get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/806406/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 806406,
    "url": "http://patchwork.ozlabs.org/api/patches/806406/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-3-Sergio.G.DelReal@gmail.com/",
    "project": {
        "id": 14,
        "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api",
        "name": "QEMU Development",
        "link_name": "qemu-devel",
        "list_id": "qemu-devel.nongnu.org",
        "list_email": "qemu-devel@nongnu.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170828015654.2530-3-Sergio.G.DelReal@gmail.com>",
    "list_archive_url": null,
    "date": "2017-08-28T01:56:42",
    "name": "[02/14] hvf: add code base from Google's QEMU repository",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "f6b2a8828517e256aac02df680378b1750782a24",
    "submitter": {
        "id": 70675,
        "url": "http://patchwork.ozlabs.org/api/people/70675/?format=api",
        "name": "Sergio Andres Gomez Del Real",
        "email": "sergio.g.delreal@gmail.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-3-Sergio.G.DelReal@gmail.com/mbox/",
    "series": [
        {
            "id": 56,
            "url": "http://patchwork.ozlabs.org/api/series/56/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=56",
            "date": "2017-08-28T01:56:40",
            "name": "add support for Hypervisor.framework in QEMU",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/56/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/806406/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/806406/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org",
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=nongnu.org\n\t(client-ip=2001:4830:134:3::11; helo=lists.gnu.org;\n\tenvelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"PGzb7KPy\"; dkim-atps=neutral"
        ],
        "Received": [
            "from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11])\n\t(using TLSv1 with cipher AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xgjkV2w0lz9sNc\n\tfor <incoming@patchwork.ozlabs.org>;\n\tMon, 28 Aug 2017 17:16:48 +1000 (AEST)",
            "from localhost ([::1]:37270 helo=lists.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.71) (envelope-from\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1dmEHt-0006Pc-DY\n\tfor incoming@patchwork.ozlabs.org; Mon, 28 Aug 2017 03:16:45 -0400",
            "from eggs.gnu.org ([2001:4830:134:3::10]:42138)\n\tby lists.gnu.org with esmtp (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9JI-00057z-Kp\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:58:17 -0400",
            "from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9Iu-0000zc-7E\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:57:52 -0400",
            "from mail-ua0-x231.google.com ([2607:f8b0:400c:c08::231]:37984)\n\tby eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16)\n\t(Exim 4.71) (envelope-from <sergio.g.delreal@gmail.com>)\n\tid 1dm9It-0000z7-Nm\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:57:28 -0400",
            "by mail-ua0-x231.google.com with SMTP id w17so13088148uaw.5\n\tfor <qemu-devel@nongnu.org>; Sun, 27 Aug 2017 18:57:27 -0700 (PDT)",
            "from localhost.localdomain ([191.109.6.191])\n\tby smtp.gmail.com with ESMTPSA id\n\ty12sm2696824uad.22.2017.08.27.18.57.18\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tSun, 27 Aug 2017 18:57:24 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=kh3OGIyAGHpeIvsrBjdSX6fpAB7OAQN/loIcRXCca2w=;\n\tb=PGzb7KPyCffSDd9BTbtqC1H454sC92sO3O4pcuGDiRD50xBnA87zhp7RsbCkOGQ+ez\n\t8mZ5uxXZkulF63WEzBO9p8Ntf6U0Qbxg+c4dyKWN7zK/ArpR9tWlorWoi77LFUZSMY9G\n\tM3LeEdUtwouD+Dl3as2izxKwQIJ7gVjxPT3wWYQQUddb174REgIMUTMr0k6M2TZZhxvw\n\tqDKvUHVRL5ByVoVfiB64eUhrTl4vzZx+EnSBdj03PZWWQyxtklAJVH2rAWrT8bc0joLV\n\tfIGq8X2aWGfF7sdqKh3Kz81VTHUcAc5HpXXWI9FShaqgacdH/8YHpRNmmRD2g4vvtA//\n\tQX5g==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=kh3OGIyAGHpeIvsrBjdSX6fpAB7OAQN/loIcRXCca2w=;\n\tb=Y20iS3eVvv803myCUoAqmsKT9BVZYiZaxZ4Lno+4Ibo6Ux7oVPCQKPWjhi7Wp3iq0r\n\tGydVB5tNaS6Ma5o/vuwxj/7omaH4IZiS4O3X1OOd2gn4Yk/o3Pf7EEkJHFHIv2B/0tVM\n\teoV0nQmP4+QAACiQKRdoDdTOcN31NCbtIHr0BGZo2kdznUJyYmRxuPab0H3tPYTmrXYy\n\tVSrR+cvT2Mp9X/hYngPBRWHKhTPsAgqqM1qJ9zK/RwE9DNgwTTeP1TpviKbnGrOYwknk\n\tnjNVb/sEVKDdgKBkXG8HOLHku6ZD0BkAg7Wld0lG3vVY34AHlS/PpcgZ4CNvtdzdRTdr\n\tg+Fg==",
        "X-Gm-Message-State": "AHYfb5gjm9/4OZexUPRYj3VGaTF/366LYXKBO/BQN/vQBPwrpP/bjaJZ\n\tjGvRrpJ0MErGkd7E",
        "X-Received": "by 10.176.24.200 with SMTP id d8mr3990414uah.81.1503885444935;\n\tSun, 27 Aug 2017 18:57:24 -0700 (PDT)",
        "From": "Sergio Andres Gomez Del Real <sergio.g.delreal@gmail.com>",
        "X-Google-Original-From": "Sergio Andres Gomez Del Real\n\t<Sergio.G.DelReal@gmail.com>",
        "To": "qemu-devel@nongnu.org",
        "Date": "Sun, 27 Aug 2017 20:56:42 -0500",
        "Message-Id": "<20170828015654.2530-3-Sergio.G.DelReal@gmail.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "In-Reply-To": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>",
        "References": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>",
        "X-detected-operating-system": "by eggs.gnu.org: Genre and OS details not\n\trecognized.",
        "X-Received-From": "2607:f8b0:400c:c08::231",
        "X-Mailman-Approved-At": "Mon, 28 Aug 2017 03:16:05 -0400",
        "Subject": "[Qemu-devel] [PATCH 02/14] hvf: add code base from Google's QEMU\n\trepository",
        "X-BeenThere": "qemu-devel@nongnu.org",
        "X-Mailman-Version": "2.1.21",
        "Precedence": "list",
        "List-Id": "<qemu-devel.nongnu.org>",
        "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.nongnu.org/archive/html/qemu-devel/>",
        "List-Post": "<mailto:qemu-devel@nongnu.org>",
        "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>",
        "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=subscribe>",
        "Cc": "Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>",
        "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org",
        "Sender": "\"Qemu-devel\"\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>"
    },
    "content": "This file begins tracking the files that will be the code base for HVF\nsupport in QEMU.\n\nSigned-off-by: Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>\n---\n cpus.c                              |   42 +\n include/sysemu/hvf.h                |   96 ++\n target/i386/hvf-all.c               |  982 +++++++++++++++++++++\n target/i386/hvf-i386.h              |   48 +\n target/i386/hvf-utils/Makefile.objs |    1 +\n target/i386/hvf-utils/README.md     |    7 +\n target/i386/hvf-utils/vmcs.h        |  368 ++++++++\n target/i386/hvf-utils/vmx.h         |  200 +++++\n target/i386/hvf-utils/x86.c         |  174 ++++\n target/i386/hvf-utils/x86.h         |  470 ++++++++++\n target/i386/hvf-utils/x86_cpuid.c   |  270 ++++++\n target/i386/hvf-utils/x86_cpuid.h   |   51 ++\n target/i386/hvf-utils/x86_decode.c  | 1659 +++++++++++++++++++++++++++++++++++\n target/i386/hvf-utils/x86_decode.h  |  314 +++++++\n target/i386/hvf-utils/x86_descr.c   |  124 +++\n target/i386/hvf-utils/x86_descr.h   |   40 +\n target/i386/hvf-utils/x86_emu.c     | 1466 +++++++++++++++++++++++++++++++\n target/i386/hvf-utils/x86_emu.h     |   16 +\n target/i386/hvf-utils/x86_flags.c   |  317 +++++++\n target/i386/hvf-utils/x86_flags.h   |  218 +++++\n target/i386/hvf-utils/x86_gen.h     |   36 +\n target/i386/hvf-utils/x86_mmu.c     |  254 ++++++\n target/i386/hvf-utils/x86_mmu.h     |   28 +\n target/i386/hvf-utils/x86hvf.c      |  501 +++++++++++\n target/i386/hvf-utils/x86hvf.h      |   19 +\n 25 files changed, 7701 insertions(+)\n create mode 100644 include/sysemu/hvf.h\n create mode 100644 target/i386/hvf-all.c\n create mode 100644 target/i386/hvf-i386.h\n create mode 100644 target/i386/hvf-utils/Makefile.objs\n create mode 100644 target/i386/hvf-utils/README.md\n create mode 100644 target/i386/hvf-utils/vmcs.h\n create mode 100644 target/i386/hvf-utils/vmx.h\n create mode 100644 target/i386/hvf-utils/x86.c\n create mode 100644 target/i386/hvf-utils/x86.h\n create mode 100644 target/i386/hvf-utils/x86_cpuid.c\n create mode 100644 target/i386/hvf-utils/x86_cpuid.h\n create mode 100644 target/i386/hvf-utils/x86_decode.c\n create mode 100644 target/i386/hvf-utils/x86_decode.h\n create mode 100644 target/i386/hvf-utils/x86_descr.c\n create mode 100644 target/i386/hvf-utils/x86_descr.h\n create mode 100644 target/i386/hvf-utils/x86_emu.c\n create mode 100644 target/i386/hvf-utils/x86_emu.h\n create mode 100644 target/i386/hvf-utils/x86_flags.c\n create mode 100644 target/i386/hvf-utils/x86_flags.h\n create mode 100644 target/i386/hvf-utils/x86_gen.h\n create mode 100644 target/i386/hvf-utils/x86_mmu.c\n create mode 100644 target/i386/hvf-utils/x86_mmu.h\n create mode 100644 target/i386/hvf-utils/x86hvf.c\n create mode 100644 target/i386/hvf-utils/x86hvf.h",
    "diff": "diff --git a/cpus.c b/cpus.c\nindex 9bed61eefc..a2cd9dfa5d 100644\n--- a/cpus.c\n+++ b/cpus.c\n@@ -1434,6 +1434,48 @@ static void *qemu_hax_cpu_thread_fn(void *arg)\n     return NULL;\n }\n \n+/* The HVF-specific vCPU thread function. This one should only run when the host\n+ * CPU supports the VMX \"unrestricted guest\" feature. */\n+static void *qemu_hvf_cpu_thread_fn(void *arg)\n+{\n+    CPUState *cpu = arg;\n+\n+    int r;\n+\n+    assert(hvf_enabled());\n+\n+    rcu_register_thread();\n+\n+    qemu_mutex_lock_iothread();\n+    qemu_thread_get_self(cpu->thread);\n+\n+    cpu->thread_id = qemu_get_thread_id();\n+    cpu->can_do_io = 1;\n+    current_cpu = cpu;\n+\n+    hvf_init_vcpu(cpu);\n+\n+    /* signal CPU creation */\n+    cpu->created = true;\n+    qemu_cond_signal(&qemu_cpu_cond);\n+\n+    do {\n+        if (cpu_can_run(cpu)) {\n+            r = hvf_vcpu_exec(cpu);\n+            if (r == EXCP_DEBUG) {\n+                cpu_handle_guest_debug(cpu);\n+            }\n+        }\n+        qemu_hvf_wait_io_event(cpu);\n+    } while (!cpu->unplug || cpu_can_run(cpu));\n+\n+    hvf_vcpu_destroy(cpu);\n+    cpu->created = false;\n+    qemu_cond_signal(&qemu_cpu_cond);\n+    qemu_mutex_unlock_iothread();\n+    return NULL;\n+}\n+\n #ifdef _WIN32\n static void CALLBACK dummy_apc_func(ULONG_PTR unused)\n {\ndiff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h\nnew file mode 100644\nindex 0000000000..b96bd5e8ba\n--- /dev/null\n+++ b/include/sysemu/hvf.h\n@@ -0,0 +1,96 @@\n+/*\n+ * QEMU Hypervisor.framework (HVF) support\n+ *\n+ * Copyright Google Inc., 2017\n+ *\n+ * This work is licensed under the terms of the GNU GPL, version 2 or later.\n+ * See the COPYING file in the top-level directory.\n+ *\n+ */\n+\n+/* header to be included in non-HVF-specific code */\n+#ifndef _HVF_H\n+#define _HVF_H\n+\n+#include \"config-host.h\"\n+#include \"qemu/osdep.h\"\n+#include \"qemu-common.h\"\n+#include \"hw/hw.h\"\n+#include \"target/i386/cpu.h\"\n+#include \"qemu/bitops.h\"\n+#include \"exec/memory.h\"\n+#include \"sysemu/accel.h\"\n+#include <Hypervisor/hv.h>\n+#include <Hypervisor/hv_vmx.h>\n+#include <Hypervisor/hv_error.h>\n+\n+\n+typedef struct hvf_slot {\n+    uint64_t start;\n+    uint64_t size;\n+    uint8_t *mem;\n+    int slot_id;\n+} hvf_slot;\n+\n+typedef struct HVFState {\n+    AccelState parent;\n+    hvf_slot slots[32];\n+    int num_slots;\n+} HVFState;\n+\n+struct hvf_vcpu_caps {\n+    uint64_t vmx_cap_pinbased;\n+    uint64_t vmx_cap_procbased;\n+    uint64_t vmx_cap_procbased2;\n+    uint64_t vmx_cap_entry;\n+    uint64_t vmx_cap_exit;\n+    uint64_t vmx_cap_preemption_timer;\n+};\n+\n+int __hvf_set_memory(hvf_slot *);\n+void hvf_set_phys_mem(MemoryRegionSection *, bool);\n+void hvf_handle_io(CPUArchState *, uint16_t, void *,\n+                  int, int, int);\n+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);\n+\n+/* Returns 1 if HVF is available and enabled, 0 otherwise. */\n+int hvf_enabled(void);\n+\n+/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by the host CPU.\n+ * Use hvf_enabled() after this to get the result. */\n+void hvf_disable(int disable);\n+\n+/* Returns non-0 if the host CPU supports the VMX \"unrestricted guest\" feature which\n+ * allows the virtual CPU to directly run in \"real mode\". If true, this allows QEMU to run\n+ * several vCPU threads in parallel (see cpus.c). Otherwise, only a a single TCG thread\n+ * can run, and it will call HVF to run the current instructions, except in case of\n+ * \"real mode\" (paging disabled, typically at boot time), or MMIO operations. */\n+// int hvf_ug_platform(void); does not apply to HVF; assume we must be in UG mode\n+\n+int hvf_sync_vcpus(void);\n+\n+int hvf_init_vcpu(CPUState *);\n+int hvf_vcpu_exec(CPUState *);\n+int hvf_smp_cpu_exec(CPUState *);\n+void hvf_cpu_synchronize_state(CPUState *);\n+void hvf_cpu_synchronize_post_reset(CPUState *);\n+void hvf_cpu_synchronize_post_init(CPUState *);\n+void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);\n+\n+void hvf_vcpu_destroy(CPUState *);\n+void hvf_raise_event(CPUState *);\n+// void hvf_reset_vcpu_state(void *opaque);\n+void vmx_reset_vcpu(CPUState *);\n+void __hvf_cpu_synchronize_state(CPUState *, run_on_cpu_data);\n+void __hvf_cpu_synchronize_post_reset(CPUState *, run_on_cpu_data);\n+void vmx_update_tpr(CPUState *);\n+void update_apic_tpr(CPUState *);\n+int apic_get_highest_priority_irr(DeviceState *);\n+int hvf_put_registers(CPUState *);\n+\n+#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME(\"hvf\")\n+\n+#define HVF_STATE(obj) \\\n+    OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)\n+\n+#endif\ndiff --git a/target/i386/hvf-all.c b/target/i386/hvf-all.c\nnew file mode 100644\nindex 0000000000..06cd8429eb\n--- /dev/null\n+++ b/target/i386/hvf-all.c\n@@ -0,0 +1,982 @@\n+#include \"qemu/osdep.h\"\n+#include \"qemu-common.h\"\n+\n+#include \"sysemu/hvf.h\"\n+#include \"hvf-i386.h\"\n+#include \"hvf-utils/vmcs.h\"\n+#include \"hvf-utils/vmx.h\"\n+#include \"hvf-utils/x86.h\"\n+#include \"hvf-utils/x86_descr.h\"\n+#include \"hvf-utils/x86_mmu.h\"\n+#include \"hvf-utils/x86_decode.h\"\n+#include \"hvf-utils/x86_emu.h\"\n+#include \"hvf-utils/x86_cpuid.h\"\n+#include \"hvf-utils/x86hvf.h\"\n+\n+#include <Hypervisor/hv.h>\n+#include <Hypervisor/hv_vmx.h>\n+\n+#include \"exec/address-spaces.h\"\n+#include \"exec/exec-all.h\"\n+#include \"exec/ioport.h\"\n+#include \"hw/i386/apic_internal.h\"\n+#include \"hw/boards.h\"\n+#include \"qemu/main-loop.h\"\n+#include \"strings.h\"\n+#include \"trace.h\"\n+#include \"sysemu/accel.h\"\n+#include \"sysemu/sysemu.h\"\n+#include \"target/i386/cpu.h\"\n+\n+pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;\n+HVFState *hvf_state;\n+static int hvf_disabled = 1;\n+\n+static void assert_hvf_ok(hv_return_t ret)\n+{\n+    if (ret == HV_SUCCESS)\n+        return;\n+\n+    switch (ret) {\n+        case HV_ERROR:\n+            fprintf(stderr, \"Error: HV_ERROR\\n\");\n+            break;\n+        case HV_BUSY:\n+            fprintf(stderr, \"Error: HV_BUSY\\n\");\n+            break;\n+        case HV_BAD_ARGUMENT:\n+            fprintf(stderr, \"Error: HV_BAD_ARGUMENT\\n\");\n+            break;\n+        case HV_NO_RESOURCES:\n+            fprintf(stderr, \"Error: HV_NO_RESOURCES\\n\");\n+            break;\n+        case HV_NO_DEVICE:\n+            fprintf(stderr, \"Error: HV_NO_DEVICE\\n\");\n+            break;\n+        case HV_UNSUPPORTED:\n+            fprintf(stderr, \"Error: HV_UNSUPPORTED\\n\");\n+            break;\n+        default:\n+            fprintf(stderr, \"Unknown Error\\n\");\n+    }\n+\n+    abort();\n+}\n+\n+// Memory slots/////////////////////////////////////////////////////////////////\n+\n+hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end) {\n+    hvf_slot *slot;\n+    int x;\n+    for (x = 0; x < hvf_state->num_slots; ++x) {\n+        slot = &hvf_state->slots[x];\n+        if (slot->size && start < (slot->start + slot->size) && end > slot->start)\n+            return slot;\n+    }\n+    return NULL;\n+}\n+\n+struct mac_slot {\n+    int present;\n+    uint64_t size;\n+    uint64_t gpa_start;\n+    uint64_t gva;\n+};\n+\n+struct mac_slot mac_slots[32];\n+#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))\n+\n+int __hvf_set_memory(hvf_slot *slot)\n+{\n+    struct mac_slot *macslot;\n+    hv_memory_flags_t flags;\n+    pthread_rwlock_wrlock(&mem_lock);\n+    hv_return_t ret;\n+\n+    macslot = &mac_slots[slot->slot_id];\n+\n+    if (macslot->present) {\n+        if (macslot->size != slot->size) {\n+            macslot->present = 0;\n+            ret = hv_vm_unmap(macslot->gpa_start, macslot->size);\n+            assert_hvf_ok(ret);\n+        }\n+    }\n+\n+    if (!slot->size) {\n+        pthread_rwlock_unlock(&mem_lock);\n+        return 0;\n+    }\n+\n+    flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;\n+\n+    macslot->present = 1;\n+    macslot->gpa_start = slot->start;\n+    macslot->size = slot->size;\n+    ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);\n+    assert_hvf_ok(ret);\n+    pthread_rwlock_unlock(&mem_lock);\n+    return 0;\n+}\n+\n+void hvf_set_phys_mem(MemoryRegionSection* section, bool add)\n+{\n+    hvf_slot *mem;\n+    MemoryRegion *area = section->mr;\n+\n+    if (!memory_region_is_ram(area)) return;\n+\n+    mem = hvf_find_overlap_slot(\n+            section->offset_within_address_space,\n+            section->offset_within_address_space + int128_get64(section->size));\n+\n+    if (mem && add) {\n+        if (mem->size == int128_get64(section->size) &&\n+                mem->start == section->offset_within_address_space &&\n+                mem->mem == (memory_region_get_ram_ptr(area) + section->offset_within_region))\n+            return; // Same region was attempted to register, go away.\n+    }\n+\n+    // Region needs to be reset. set the size to 0 and remap it.\n+    if (mem) {\n+        mem->size = 0;\n+        if (__hvf_set_memory(mem)) {\n+            fprintf(stderr, \"Failed to reset overlapping slot\\n\");\n+            abort();\n+        }\n+    }\n+\n+    if (!add) return;\n+\n+    // Now make a new slot.\n+    int x;\n+\n+    for (x = 0; x < hvf_state->num_slots; ++x) {\n+        mem = &hvf_state->slots[x];\n+        if (!mem->size)\n+            break;\n+    }\n+\n+    if (x == hvf_state->num_slots) {\n+        fprintf(stderr, \"No free slots\\n\");\n+        abort();\n+    }\n+\n+    mem->size = int128_get64(section->size);\n+    mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;\n+    mem->start = section->offset_within_address_space;\n+\n+    if (__hvf_set_memory(mem)) {\n+        fprintf(stderr, \"Error registering new memory slot\\n\");\n+        abort();\n+    }\n+}\n+\n+/* return -1 if no bit is set */\n+static int get_highest_priority_int(uint32_t *tab)\n+{\n+    int i;\n+    for (i = 7; i >= 0; i--) {\n+        if (tab[i] != 0) {\n+            return i * 32 + apic_fls_bit(tab[i]);\n+        }\n+    }\n+    return -1;\n+}\n+\n+void vmx_update_tpr(CPUState *cpu)\n+{\n+    // TODO: need integrate APIC handling\n+    X86CPU *x86_cpu = X86_CPU(cpu);\n+    int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;\n+    int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);\n+\n+    wreg(cpu->hvf_fd, HV_X86_TPR, tpr);\n+    if (irr == -1)\n+        wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n+    else\n+        wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : irr >> 4);\n+}\n+\n+void update_apic_tpr(CPUState *cpu)\n+{\n+    X86CPU *x86_cpu = X86_CPU(cpu);\n+    int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;\n+    cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n+}\n+\n+#define VECTORING_INFO_VECTOR_MASK     0xff\n+\n+// TODO: taskswitch handling\n+static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n+{\n+    /* CR3 and ldt selector are not saved intentionally */\n+    tss->eip = EIP(cpu);\n+    tss->eflags = EFLAGS(cpu);\n+    tss->eax = EAX(cpu);\n+    tss->ecx = ECX(cpu);\n+    tss->edx = EDX(cpu);\n+    tss->ebx = EBX(cpu);\n+    tss->esp = ESP(cpu);\n+    tss->ebp = EBP(cpu);\n+    tss->esi = ESI(cpu);\n+    tss->edi = EDI(cpu);\n+\n+    tss->es = vmx_read_segment_selector(cpu, REG_SEG_ES).sel;\n+    tss->cs = vmx_read_segment_selector(cpu, REG_SEG_CS).sel;\n+    tss->ss = vmx_read_segment_selector(cpu, REG_SEG_SS).sel;\n+    tss->ds = vmx_read_segment_selector(cpu, REG_SEG_DS).sel;\n+    tss->fs = vmx_read_segment_selector(cpu, REG_SEG_FS).sel;\n+    tss->gs = vmx_read_segment_selector(cpu, REG_SEG_GS).sel;\n+}\n+\n+static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n+{\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);\n+\n+    RIP(cpu) = tss->eip;\n+    EFLAGS(cpu) = tss->eflags | 2;\n+\n+    /* General purpose registers */\n+    RAX(cpu) = tss->eax;\n+    RCX(cpu) = tss->ecx;\n+    RDX(cpu) = tss->edx;\n+    RBX(cpu) = tss->ebx;\n+    RSP(cpu) = tss->esp;\n+    RBP(cpu) = tss->ebp;\n+    RSI(cpu) = tss->esi;\n+    RDI(cpu) = tss->edi;\n+\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, REG_SEG_LDTR);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, REG_SEG_ES);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, REG_SEG_CS);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, REG_SEG_SS);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, REG_SEG_DS);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, REG_SEG_FS);\n+    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, REG_SEG_GS);\n+\n+#if 0\n+    load_segment(cpu, REG_SEG_LDTR, tss->ldt);\n+    load_segment(cpu, REG_SEG_ES, tss->es);\n+    load_segment(cpu, REG_SEG_CS, tss->cs);\n+    load_segment(cpu, REG_SEG_SS, tss->ss);\n+    load_segment(cpu, REG_SEG_DS, tss->ds);\n+    load_segment(cpu, REG_SEG_FS, tss->fs);\n+    load_segment(cpu, REG_SEG_GS, tss->gs);\n+#endif\n+}\n+\n+static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,\n+                          uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)\n+{\n+    struct x86_tss_segment32 tss_seg;\n+    uint32_t new_tss_base = x86_segment_base(new_desc);\n+    uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);\n+    uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);\n+\n+    vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));\n+    save_state_to_tss32(cpu, &tss_seg);\n+\n+    vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);\n+    vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));\n+\n+    if (old_tss_sel.sel != 0xffff) {\n+        tss_seg.prev_tss = old_tss_sel.sel;\n+\n+        vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));\n+    }\n+    load_state_from_tss32(cpu, &tss_seg);\n+    return 0;\n+}\n+\n+static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)\n+{\n+    uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n+    if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&\n+                        gate_type != VMCS_INTR_T_HWINTR &&\n+                        gate_type != VMCS_INTR_T_NMI)) {\n+        int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+        macvm_set_rip(cpu, rip + ins_len);\n+        return;\n+    }\n+\n+    load_regs(cpu);\n+\n+    struct x86_segment_descriptor curr_tss_desc, next_tss_desc;\n+    int ret;\n+    x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, REG_SEG_TR);\n+    uint64_t old_tss_base = vmx_read_segment_base(cpu, REG_SEG_TR);\n+    uint32_t desc_limit;\n+    struct x86_call_gate task_gate_desc;\n+    struct vmx_segment vmx_seg;\n+\n+    x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);\n+    x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);\n+\n+    if (reason == TSR_IDT_GATE && gate_valid) {\n+        int dpl;\n+\n+        ret = x86_read_call_gate(cpu, &task_gate_desc, gate);\n+\n+        dpl = task_gate_desc.dpl;\n+        x68_segment_selector cs = vmx_read_segment_selector(cpu, REG_SEG_CS);\n+        if (tss_sel.rpl > dpl || cs.rpl > dpl)\n+            ;//DPRINTF(\"emulate_gp\");\n+    }\n+\n+    desc_limit = x86_segment_limit(&next_tss_desc);\n+    if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {\n+        VM_PANIC(\"emulate_ts\");\n+    }\n+\n+    if (reason == TSR_IRET || reason == TSR_JMP) {\n+        curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */\n+        x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);\n+    }\n+\n+    if (reason == TSR_IRET)\n+        EFLAGS(cpu) &= ~RFLAGS_NT;\n+\n+    if (reason != TSR_CALL && reason != TSR_IDT_GATE)\n+        old_tss_sel.sel = 0xffff;\n+\n+    if (reason != TSR_IRET) {\n+        next_tss_desc.type |= (1 << 1); /* set busy flag */\n+        x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);\n+    }\n+\n+    if (next_tss_desc.type & 8)\n+        ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);\n+    else\n+        //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);\n+        VM_PANIC(\"task_switch_16\");\n+\n+    macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);\n+    x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);\n+    vmx_write_segment_descriptor(cpu, &vmx_seg, REG_SEG_TR);\n+\n+    store_regs(cpu);\n+\n+    hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n+    hv_vcpu_flush(cpu->hvf_fd);\n+}\n+\n+static void hvf_handle_interrupt(CPUState * cpu, int mask)\n+{\n+    cpu->interrupt_request |= mask;\n+    if (!qemu_cpu_is_self(cpu)) {\n+        qemu_cpu_kick(cpu);\n+    }\n+}\n+\n+void hvf_handle_io(CPUArchState * env, uint16_t port, void* buffer,\n+                  int direction, int size, int count)\n+{\n+    int i;\n+    uint8_t *ptr = buffer;\n+\n+    for (i = 0; i < count; i++) {\n+        address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,\n+                         ptr, size,\n+                         direction);\n+        ptr += size;\n+    }\n+}\n+//\n+// TODO: synchronize vcpu state\n+void __hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)\n+{\n+    CPUState *cpu_state = cpu;//(CPUState *)data;\n+    if (cpu_state->hvf_vcpu_dirty == 0)\n+        hvf_get_registers(cpu_state);\n+\n+    cpu_state->hvf_vcpu_dirty = 1;\n+}\n+\n+void hvf_cpu_synchronize_state(CPUState *cpu_state)\n+{\n+    if (cpu_state->hvf_vcpu_dirty == 0)\n+        run_on_cpu(cpu_state, __hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);\n+}\n+\n+void __hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)\n+{\n+    CPUState *cpu_state = cpu;\n+    hvf_put_registers(cpu_state);\n+    cpu_state->hvf_vcpu_dirty = false;\n+}\n+\n+void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)\n+{\n+    run_on_cpu(cpu_state, __hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);\n+}\n+\n+void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)\n+{\n+    CPUState *cpu_state = cpu;\n+    hvf_put_registers(cpu_state);\n+    cpu_state->hvf_vcpu_dirty = false;\n+}\n+\n+void hvf_cpu_synchronize_post_init(CPUState *cpu_state)\n+{\n+    run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);\n+}\n+ \n+// TODO: ept fault handlig\n+void vmx_clear_int_window_exiting(CPUState *cpu);\n+static bool ept_emulation_fault(uint64_t ept_qual)\n+{\n+\tint read, write;\n+\n+\t/* EPT fault on an instruction fetch doesn't make sense here */\n+\tif (ept_qual & EPT_VIOLATION_INST_FETCH)\n+\t\treturn false;\n+\n+\t/* EPT fault must be a read fault or a write fault */\n+\tread = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;\n+\twrite = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;\n+\tif ((read | write) == 0)\n+\t\treturn false;\n+\n+\t/*\n+\t * The EPT violation must have been caused by accessing a\n+\t * guest-physical address that is a translation of a guest-linear\n+\t * address.\n+\t */\n+\tif ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||\n+\t    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {\n+\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static void hvf_region_add(MemoryListener * listener,\n+                           MemoryRegionSection * section)\n+{\n+    hvf_set_phys_mem(section, true);\n+}\n+\n+static void hvf_region_del(MemoryListener * listener,\n+                           MemoryRegionSection * section)\n+{\n+    hvf_set_phys_mem(section, false);\n+}\n+\n+static MemoryListener hvf_memory_listener = {\n+    .priority = 10,\n+    .region_add = hvf_region_add,\n+    .region_del = hvf_region_del,\n+};\n+\n+static MemoryListener hvf_io_listener = {\n+    .priority = 10,\n+};\n+\n+void vmx_reset_vcpu(CPUState *cpu) {\n+\n+    wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);\n+    macvm_set_cr0(cpu->hvf_fd, 0x60000010);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);\n+    wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);\n+\n+    // set VMCS guest state fields\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);\n+\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);\n+\n+    //wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);\n+\n+    wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);\n+    wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);\n+    wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);\n+    wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);\n+    wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);\n+\n+    for (int i = 0; i < 8; i++)\n+         wreg(cpu->hvf_fd, HV_X86_R8+i, 0x0);\n+\n+    hv_vm_sync_tsc(0);\n+    cpu->halted = 0;\n+    hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n+    hv_vcpu_flush(cpu->hvf_fd);\n+}\n+\n+void hvf_vcpu_destroy(CPUState* cpu) \n+{\n+    hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);\n+    assert_hvf_ok(ret);\n+}\n+\n+static void dummy_signal(int sig)\n+{\n+}\n+\n+int hvf_init_vcpu(CPUState * cpu) {\n+\n+    X86CPU *x86cpu;\n+    \n+    // init cpu signals\n+    sigset_t set;\n+    struct sigaction sigact;\n+\n+    memset(&sigact, 0, sizeof(sigact));\n+    sigact.sa_handler = dummy_signal;\n+    sigaction(SIG_IPI, &sigact, NULL);\n+\n+    pthread_sigmask(SIG_BLOCK, NULL, &set);\n+    sigdelset(&set, SIG_IPI);\n+\n+    int r;\n+    init_emu(cpu);\n+    init_decoder(cpu);\n+    init_cpuid(cpu);\n+\n+    cpu->hvf_caps = (struct hvf_vcpu_caps*)g_malloc0(sizeof(struct hvf_vcpu_caps));\n+    cpu->hvf_x86 = (struct hvf_x86_state*)g_malloc0(sizeof(struct hvf_x86_state));\n+\n+    r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);\n+    cpu->hvf_vcpu_dirty = 1;\n+    assert_hvf_ok(r);\n+\n+\tif (hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &cpu->hvf_caps->vmx_cap_pinbased))\n+\t\tabort();\n+\tif (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cpu->hvf_caps->vmx_cap_procbased))\n+\t\tabort();\n+\tif (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cpu->hvf_caps->vmx_cap_procbased2))\n+\t\tabort();\n+\tif (hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cpu->hvf_caps->vmx_cap_entry))\n+\t\tabort();\n+\n+\t/* set VMCS control fields */\n+    wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_pinbased, 0));\n+    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_procbased,\n+                                                   VMCS_PRI_PROC_BASED_CTLS_HLT |\n+                                                   VMCS_PRI_PROC_BASED_CTLS_MWAIT |\n+                                                   VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |\n+                                                   VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |\n+                                                   VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);\n+\twvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,\n+          cap2ctrl(cpu->hvf_caps->vmx_cap_procbased2,VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));\n+\n+\twvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_entry, 0));\n+\twvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */\n+\n+    wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n+\n+    vmx_reset_vcpu(cpu);\n+\n+    x86cpu = X86_CPU(cpu);\n+    x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, sizeof(struct hvf_xsave_buf));\n+\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);\n+    //hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);\n+\n+    return 0;\n+}\n+\n+int hvf_enabled() { return !hvf_disabled; }\n+void hvf_disable(int shouldDisable) {\n+    hvf_disabled = shouldDisable;\n+}\n+\n+int hvf_vcpu_exec(CPUState* cpu) {\n+    X86CPU *x86_cpu = X86_CPU(cpu);\n+    CPUX86State *env = &x86_cpu->env;\n+    int ret = 0;\n+    uint64_t rip = 0;\n+\n+    cpu->halted = 0;\n+\n+    if (hvf_process_events(cpu)) {\n+        return EXCP_HLT;\n+    }\n+\n+    do {\n+        if (cpu->hvf_vcpu_dirty) {\n+            hvf_put_registers(cpu);\n+            cpu->hvf_vcpu_dirty = false;\n+        }\n+\n+        cpu->hvf_x86->interruptable =\n+            !(rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n+            (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n+\n+        hvf_inject_interrupts(cpu);\n+        vmx_update_tpr(cpu);\n+\n+\n+        qemu_mutex_unlock_iothread();\n+        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {\n+            qemu_mutex_lock_iothread();\n+            return EXCP_HLT;\n+        }\n+\n+        hv_return_t r  = hv_vcpu_run(cpu->hvf_fd);\n+        assert_hvf_ok(r);\n+\n+        /* handle VMEXIT */\n+        uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);\n+        uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);\n+        uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+        uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+        rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n+        RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+        env->eflags = RFLAGS(cpu);\n+\n+        trace_hvf_vm_exit(exit_reason, exit_qual);\n+\n+        qemu_mutex_lock_iothread();\n+\n+        update_apic_tpr(cpu);\n+        current_cpu = cpu;\n+\n+        ret = 0;\n+        switch (exit_reason) {\n+            case EXIT_REASON_HLT: {\n+                macvm_set_rip(cpu, rip + ins_len);\n+                if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(cpu) & IF_MASK))\n+                    && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&\n+                    !(idtvec_info & VMCS_IDT_VEC_VALID)) {\n+                    cpu->halted = 1;\n+                    ret = EXCP_HLT;\n+                }\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            }\n+            case EXIT_REASON_MWAIT: {\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            }\n+                /* Need to check if MMIO or unmmaped fault */\n+            case EXIT_REASON_EPT_FAULT:\n+            {\n+                hvf_slot *slot;\n+                addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n+                trace_hvf_vm_exit_gpa(gpa);\n+\n+                if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && (exit_qual & EXIT_QUAL_NMIUDTI) != 0)\n+                    vmx_set_nmi_blocking(cpu);\n+\n+                slot = hvf_find_overlap_slot(gpa, gpa);\n+                // mmio\n+                if (ept_emulation_fault(exit_qual) && !slot) {\n+                    struct x86_decode decode;\n+\n+                    load_regs(cpu);\n+                    cpu->hvf_x86->fetch_rip = rip;\n+\n+                    decode_instruction(cpu, &decode);\n+                    exec_instruction(cpu, &decode);\n+                    store_regs(cpu);\n+                    break;\n+                }\n+#ifdef DIRTY_VGA_TRACKING\n+                if (slot) {\n+                    bool read = exit_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;\n+                    bool write = exit_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;\n+                    if (!read && !write)\n+                        break;\n+                    int flags = HV_MEMORY_READ | HV_MEMORY_EXEC;\n+                    if (write) flags |= HV_MEMORY_WRITE;\n+\n+                    pthread_rwlock_wrlock(&mem_lock);\n+                    if (write)\n+                        mark_slot_page_dirty(slot, gpa);\n+                    hv_vm_protect(gpa & ~0xfff, 4096, flags);\n+                    pthread_rwlock_unlock(&mem_lock);\n+                }\n+#endif\n+                break;\n+            }\n+            case EXIT_REASON_INOUT:\n+            {\n+                uint32_t in = (exit_qual & 8) != 0;\n+                uint32_t size =  (exit_qual & 7) + 1;\n+                uint32_t string =  (exit_qual & 16) != 0;\n+                uint32_t port =  exit_qual >> 16;\n+                //uint32_t rep = (exit_qual & 0x20) != 0;\n+\n+#if 1\n+                if (!string && in) {\n+                    uint64_t val = 0;\n+                    load_regs(cpu);\n+                    hvf_handle_io(env, port, &val, 0, size, 1);\n+                    if (size == 1) AL(cpu) = val;\n+                    else if (size == 2) AX(cpu) = val;\n+                    else if (size == 4) RAX(cpu) = (uint32_t)val;\n+                    else VM_PANIC(\"size\");\n+                    RIP(cpu) += ins_len;\n+                    store_regs(cpu);\n+                    break;\n+                } else if (!string && !in) {\n+                    RAX(cpu) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+                    hvf_handle_io(env, port, &RAX(cpu), 1, size, 1);\n+                    macvm_set_rip(cpu, rip + ins_len);\n+                    break;\n+                }\n+#endif\n+                struct x86_decode decode;\n+\n+                load_regs(cpu);\n+                cpu->hvf_x86->fetch_rip = rip;\n+\n+                decode_instruction(cpu, &decode);\n+                VM_PANIC_ON(ins_len != decode.len);\n+                exec_instruction(cpu, &decode);\n+                store_regs(cpu);\n+\n+                break;\n+            }\n+            case EXIT_REASON_CPUID: {\n+                uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n+                uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);\n+                uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n+                uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+\n+                get_cpuid_func(cpu, rax, rcx, &rax, &rbx, &rcx, &rdx);\n+\n+                wreg(cpu->hvf_fd, HV_X86_RAX, rax);\n+                wreg(cpu->hvf_fd, HV_X86_RBX, rbx);\n+                wreg(cpu->hvf_fd, HV_X86_RCX, rcx);\n+                wreg(cpu->hvf_fd, HV_X86_RDX, rdx);\n+\n+                macvm_set_rip(cpu, rip + ins_len);\n+                break;\n+            }\n+            case EXIT_REASON_XSETBV: {\n+                X86CPU *x86_cpu = X86_CPU(cpu);\n+                CPUX86State *env = &x86_cpu->env;\n+                uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n+                uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n+                uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+\n+                if (ecx) {\n+                    macvm_set_rip(cpu, rip + ins_len);\n+                    break;\n+                }\n+                env->xcr0 = ((uint64_t)edx << 32) | eax;\n+                wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);\n+                macvm_set_rip(cpu, rip + ins_len);\n+                break;\n+            }\n+            case EXIT_REASON_INTR_WINDOW:\n+                vmx_clear_int_window_exiting(cpu);\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            case EXIT_REASON_NMI_WINDOW:\n+                vmx_clear_nmi_window_exiting(cpu);\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            case EXIT_REASON_EXT_INTR:\n+                /* force exit and allow io handling */\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            case EXIT_REASON_RDMSR:\n+            case EXIT_REASON_WRMSR:\n+            {\n+                load_regs(cpu);\n+                if (exit_reason == EXIT_REASON_RDMSR)\n+                    simulate_rdmsr(cpu);\n+                else\n+                    simulate_wrmsr(cpu);\n+                RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+                store_regs(cpu);\n+                break;\n+            }\n+            case EXIT_REASON_CR_ACCESS: {\n+                int cr;\n+                int reg;\n+\n+                load_regs(cpu);\n+                cr = exit_qual & 15;\n+                reg = (exit_qual >> 8) & 15;\n+\n+                switch (cr) {\n+                    case 0x0: {\n+                        macvm_set_cr0(cpu->hvf_fd, RRX(cpu, reg));\n+                        break;\n+                    }\n+                    case 4: {\n+                        macvm_set_cr4(cpu->hvf_fd, RRX(cpu, reg));\n+                        break;\n+                    }\n+                    case 8: {\n+                        X86CPU *x86_cpu = X86_CPU(cpu);\n+                        if (exit_qual & 0x10) {\n+                            RRX(cpu, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);\n+                        }\n+                        else {\n+                            int tpr = RRX(cpu, reg);\n+                            cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n+                            ret = EXCP_INTERRUPT;\n+                        }\n+                        break;\n+                    }\n+                    default:\n+                        fprintf(stderr, \"Unrecognized CR %d\\n\", cr);\n+                        abort();\n+                }\n+                RIP(cpu) += ins_len;\n+                store_regs(cpu);\n+                break;\n+            }\n+            case EXIT_REASON_APIC_ACCESS: { // TODO\n+                struct x86_decode decode;\n+\n+                load_regs(cpu);\n+                cpu->hvf_x86->fetch_rip = rip;\n+\n+                decode_instruction(cpu, &decode);\n+                exec_instruction(cpu, &decode);\n+                store_regs(cpu);\n+                break;\n+            }\n+            case EXIT_REASON_TPR: {\n+                ret = 1;\n+                break;\n+            }\n+            case EXIT_REASON_TASK_SWITCH: {\n+                uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+                x68_segment_selector sel = {.sel = exit_qual & 0xffff};\n+                vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,\n+                 vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo & VMCS_INTR_T_MASK);\n+                break;\n+            }\n+            case EXIT_REASON_TRIPLE_FAULT: {\n+                //addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n+                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);\n+                usleep(1000 * 100);\n+                ret = EXCP_INTERRUPT;\n+                break;\n+            }\n+            case EXIT_REASON_RDPMC:\n+                wreg(cpu->hvf_fd, HV_X86_RAX, 0);\n+                wreg(cpu->hvf_fd, HV_X86_RDX, 0);\n+                macvm_set_rip(cpu, rip + ins_len);\n+                break;\n+            case VMX_REASON_VMCALL:\n+                // TODO: maybe just take this out?\n+                // if (g_hypervisor_iface) {\n+                //     load_regs(cpu);\n+                //     g_hypervisor_iface->hypercall_handler(cpu);\n+                //     RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+                //     store_regs(cpu);\n+                // }\n+                break;\n+            default:\n+                fprintf(stderr, \"%llx: unhandled exit %llx\\n\", rip, exit_reason);\n+        }\n+    } while (ret == 0);\n+\n+    return ret;\n+}\n+\n+static bool hvf_allowed;\n+\n+static int hvf_accel_init(MachineState *ms)\n+{\n+    int x;\n+    hv_return_t ret;\n+    HVFState *s;\n+\n+    ret = hv_vm_create(HV_VM_DEFAULT);\n+    assert_hvf_ok(ret);\n+\n+    s = (HVFState *)g_malloc0(sizeof(HVFState));\n+ \n+    s->num_slots = 32;\n+    for (x = 0; x < s->num_slots; ++x) {\n+        s->slots[x].size = 0;\n+        s->slots[x].slot_id = x;\n+    }\n+  \n+    hvf_state = s;\n+    cpu_interrupt_handler = hvf_handle_interrupt;\n+    memory_listener_register(&hvf_memory_listener, &address_space_memory);\n+    memory_listener_register(&hvf_io_listener, &address_space_io);\n+    return 0;\n+}\n+\n+static void hvf_accel_class_init(ObjectClass *oc, void *data)\n+{\n+    AccelClass *ac = ACCEL_CLASS(oc);\n+    ac->name = \"HVF\";\n+    ac->init_machine = hvf_accel_init;\n+    ac->allowed = &hvf_allowed;\n+}\n+\n+static const TypeInfo hvf_accel_type = {\n+    .name = TYPE_HVF_ACCEL,\n+    .parent = TYPE_ACCEL,\n+    .class_init = hvf_accel_class_init,\n+};\n+\n+static void hvf_type_init(void)\n+{\n+    type_register_static(&hvf_accel_type);\n+}\n+\n+type_init(hvf_type_init);\ndiff --git a/target/i386/hvf-i386.h b/target/i386/hvf-i386.h\nnew file mode 100644\nindex 0000000000..f3f958058a\n--- /dev/null\n+++ b/target/i386/hvf-i386.h\n@@ -0,0 +1,48 @@\n+/*\n+ * QEMU Hypervisor.framework (HVF) support\n+ *\n+ * Copyright 2017 Google Inc\n+ *\n+ * Adapted from target-i386/hax-i386.h:\n+ * Copyright (c) 2011 Intel Corporation\n+ *  Written by:\n+ *  Jiang Yunhong<yunhong.jiang@intel.com>\n+ *\n+ * This work is licensed under the terms of the GNU GPL, version 2 or later.\n+ * See the COPYING file in the top-level directory.\n+ *\n+ */\n+\n+#ifndef _HVF_I386_H\n+#define _HVF_I386_H\n+\n+#include \"sysemu/hvf.h\"\n+#include \"cpu.h\"\n+#include \"hvf-utils/x86.h\"\n+\n+#define HVF_MAX_VCPU 0x10\n+#define MAX_VM_ID 0x40\n+#define MAX_VCPU_ID 0x40\n+\n+extern struct hvf_state hvf_global;\n+\n+struct hvf_vm {\n+    int id;\n+    struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];\n+};\n+\n+struct hvf_state {\n+    uint32_t version;\n+    struct hvf_vm *vm;\n+    uint64_t mem_quota;\n+};\n+\n+#ifdef NEED_CPU_H\n+/* Functions exported to host specific mode */\n+\n+/* Host specific functions */\n+int hvf_inject_interrupt(CPUArchState * env, int vector);\n+int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);\n+#endif\n+\n+#endif\ndiff --git a/target/i386/hvf-utils/Makefile.objs b/target/i386/hvf-utils/Makefile.objs\nnew file mode 100644\nindex 0000000000..7df219ad9c\n--- /dev/null\n+++ b/target/i386/hvf-utils/Makefile.objs\n@@ -0,0 +1 @@\n+obj-y += x86.o x86_cpuid.o x86_decode.o x86_descr.o x86_emu.o x86_flags.o x86_mmu.o x86hvf.o\ndiff --git a/target/i386/hvf-utils/README.md b/target/i386/hvf-utils/README.md\nnew file mode 100644\nindex 0000000000..0d27a0d52b\n--- /dev/null\n+++ b/target/i386/hvf-utils/README.md\n@@ -0,0 +1,7 @@\n+# OS X Hypervisor.framework support in QEMU\n+\n+These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were:\n+\n+1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, kvm_xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.\n+2. Removal of `apic_page` and hyperv-related functionality.\n+3. More relaxed use of `qemu_mutex_lock_iothread`.\ndiff --git a/target/i386/hvf-utils/vmcs.h b/target/i386/hvf-utils/vmcs.h\nnew file mode 100644\nindex 0000000000..6f7ccb361a\n--- /dev/null\n+++ b/target/i386/hvf-utils/vmcs.h\n@@ -0,0 +1,368 @@\n+/*-\n+ * Copyright (c) 2011 NetApp, Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ *\n+ * $FreeBSD$\n+ */\n+\n+#ifndef _VMCS_H_\n+#define\t_VMCS_H_\n+\n+#include <Hypervisor/hv.h>\n+#include <Hypervisor/hv_vmx.h>\n+\n+#define\tVMCS_INITIAL\t\t\t0xffffffffffffffff\n+\n+#define\tVMCS_IDENT(encoding)\t\t((encoding) | 0x80000000)\n+/*\n+ * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.\n+ */\n+#define\tVMCS_INVALID_ENCODING\t\t0xffffffff\n+\n+/* 16-bit control fields */\n+#define\tVMCS_VPID\t\t\t0x00000000\n+#define\tVMCS_PIR_VECTOR\t\t\t0x00000002\n+\n+/* 16-bit guest-state fields */\n+#define\tVMCS_GUEST_ES_SELECTOR\t\t0x00000800\n+#define\tVMCS_GUEST_CS_SELECTOR\t\t0x00000802\n+#define\tVMCS_GUEST_SS_SELECTOR\t\t0x00000804\n+#define\tVMCS_GUEST_DS_SELECTOR\t\t0x00000806\n+#define\tVMCS_GUEST_FS_SELECTOR\t\t0x00000808\n+#define\tVMCS_GUEST_GS_SELECTOR\t\t0x0000080A\n+#define\tVMCS_GUEST_LDTR_SELECTOR\t0x0000080C\n+#define\tVMCS_GUEST_TR_SELECTOR\t\t0x0000080E\n+#define\tVMCS_GUEST_INTR_STATUS\t\t0x00000810\n+\n+/* 16-bit host-state fields */\n+#define\tVMCS_HOST_ES_SELECTOR\t\t0x00000C00\n+#define\tVMCS_HOST_CS_SELECTOR\t\t0x00000C02\n+#define\tVMCS_HOST_SS_SELECTOR\t\t0x00000C04\n+#define\tVMCS_HOST_DS_SELECTOR\t\t0x00000C06\n+#define\tVMCS_HOST_FS_SELECTOR\t\t0x00000C08\n+#define\tVMCS_HOST_GS_SELECTOR\t\t0x00000C0A\n+#define\tVMCS_HOST_TR_SELECTOR\t\t0x00000C0C\n+\n+/* 64-bit control fields */\n+#define\tVMCS_IO_BITMAP_A\t\t0x00002000\n+#define\tVMCS_IO_BITMAP_B\t\t0x00002002\n+#define\tVMCS_MSR_BITMAP\t\t\t0x00002004\n+#define\tVMCS_EXIT_MSR_STORE\t\t0x00002006\n+#define\tVMCS_EXIT_MSR_LOAD\t\t0x00002008\n+#define\tVMCS_ENTRY_MSR_LOAD\t\t0x0000200A\n+#define\tVMCS_EXECUTIVE_VMCS\t\t0x0000200C\n+#define\tVMCS_TSC_OFFSET\t\t\t0x00002010\n+#define\tVMCS_VIRTUAL_APIC\t\t0x00002012\n+#define\tVMCS_APIC_ACCESS\t\t0x00002014\n+#define\tVMCS_PIR_DESC\t\t\t0x00002016\n+#define\tVMCS_EPTP\t\t\t0x0000201A\n+#define\tVMCS_EOI_EXIT0\t\t\t0x0000201C\n+#define\tVMCS_EOI_EXIT1\t\t\t0x0000201E\n+#define\tVMCS_EOI_EXIT2\t\t\t0x00002020\n+#define\tVMCS_EOI_EXIT3\t\t\t0x00002022\n+#define\tVMCS_EOI_EXIT(vector)\t\t(VMCS_EOI_EXIT0 + ((vector) / 64) * 2)\n+\n+/* 64-bit read-only fields */\n+#define\tVMCS_GUEST_PHYSICAL_ADDRESS\t0x00002400\n+\n+/* 64-bit guest-state fields */\n+#define\tVMCS_LINK_POINTER\t\t0x00002800\n+#define\tVMCS_GUEST_IA32_DEBUGCTL\t0x00002802\n+#define\tVMCS_GUEST_IA32_PAT\t\t0x00002804\n+#define\tVMCS_GUEST_IA32_EFER\t\t0x00002806\n+#define\tVMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808\n+#define\tVMCS_GUEST_PDPTE0\t\t0x0000280A\n+#define\tVMCS_GUEST_PDPTE1\t\t0x0000280C\n+#define\tVMCS_GUEST_PDPTE2\t\t0x0000280E\n+#define\tVMCS_GUEST_PDPTE3\t\t0x00002810\n+\n+/* 64-bit host-state fields */\n+#define\tVMCS_HOST_IA32_PAT\t\t0x00002C00\n+#define\tVMCS_HOST_IA32_EFER\t\t0x00002C02\n+#define\tVMCS_HOST_IA32_PERF_GLOBAL_CTRL\t0x00002C04\n+\n+/* 32-bit control fields */\n+#define\tVMCS_PIN_BASED_CTLS\t\t0x00004000\n+#define\tVMCS_PRI_PROC_BASED_CTLS\t0x00004002\n+#define\tVMCS_EXCEPTION_BITMAP\t\t0x00004004\n+#define\tVMCS_PF_ERROR_MASK\t\t0x00004006\n+#define\tVMCS_PF_ERROR_MATCH\t\t0x00004008\n+#define\tVMCS_CR3_TARGET_COUNT\t\t0x0000400A\n+#define\tVMCS_EXIT_CTLS\t\t\t0x0000400C\n+#define\tVMCS_EXIT_MSR_STORE_COUNT\t0x0000400E\n+#define\tVMCS_EXIT_MSR_LOAD_COUNT\t0x00004010\n+#define\tVMCS_ENTRY_CTLS\t\t\t0x00004012\n+#define\tVMCS_ENTRY_MSR_LOAD_COUNT\t0x00004014\n+#define\tVMCS_ENTRY_INTR_INFO\t\t0x00004016\n+#define\tVMCS_ENTRY_EXCEPTION_ERROR\t0x00004018\n+#define\tVMCS_ENTRY_INST_LENGTH\t\t0x0000401A\n+#define\tVMCS_TPR_THRESHOLD\t\t0x0000401C\n+#define\tVMCS_SEC_PROC_BASED_CTLS\t0x0000401E\n+#define\tVMCS_PLE_GAP\t\t\t0x00004020\n+#define\tVMCS_PLE_WINDOW\t\t\t0x00004022\n+\n+/* 32-bit read-only data fields */\n+#define\tVMCS_INSTRUCTION_ERROR\t\t0x00004400\n+#define\tVMCS_EXIT_REASON\t\t0x00004402\n+#define\tVMCS_EXIT_INTR_INFO\t\t0x00004404\n+#define\tVMCS_EXIT_INTR_ERRCODE\t\t0x00004406\n+#define\tVMCS_IDT_VECTORING_INFO\t\t0x00004408\n+#define\tVMCS_IDT_VECTORING_ERROR\t0x0000440A\n+#define\tVMCS_EXIT_INSTRUCTION_LENGTH\t0x0000440C\n+#define\tVMCS_EXIT_INSTRUCTION_INFO\t0x0000440E\n+\n+/* 32-bit guest-state fields */\n+#define\tVMCS_GUEST_ES_LIMIT\t\t0x00004800\n+#define\tVMCS_GUEST_CS_LIMIT\t\t0x00004802\n+#define\tVMCS_GUEST_SS_LIMIT\t\t0x00004804\n+#define\tVMCS_GUEST_DS_LIMIT\t\t0x00004806\n+#define\tVMCS_GUEST_FS_LIMIT\t\t0x00004808\n+#define\tVMCS_GUEST_GS_LIMIT\t\t0x0000480A\n+#define\tVMCS_GUEST_LDTR_LIMIT\t\t0x0000480C\n+#define\tVMCS_GUEST_TR_LIMIT\t\t0x0000480E\n+#define\tVMCS_GUEST_GDTR_LIMIT\t\t0x00004810\n+#define\tVMCS_GUEST_IDTR_LIMIT\t\t0x00004812\n+#define\tVMCS_GUEST_ES_ACCESS_RIGHTS\t0x00004814\n+#define\tVMCS_GUEST_CS_ACCESS_RIGHTS\t0x00004816\n+#define\tVMCS_GUEST_SS_ACCESS_RIGHTS\t0x00004818\n+#define\tVMCS_GUEST_DS_ACCESS_RIGHTS\t0x0000481A\n+#define\tVMCS_GUEST_FS_ACCESS_RIGHTS\t0x0000481C\n+#define\tVMCS_GUEST_GS_ACCESS_RIGHTS\t0x0000481E\n+#define\tVMCS_GUEST_LDTR_ACCESS_RIGHTS\t0x00004820\n+#define\tVMCS_GUEST_TR_ACCESS_RIGHTS\t0x00004822\n+#define\tVMCS_GUEST_INTERRUPTIBILITY\t0x00004824\n+#define\tVMCS_GUEST_ACTIVITY\t\t0x00004826\n+#define VMCS_GUEST_SMBASE\t\t0x00004828\n+#define\tVMCS_GUEST_IA32_SYSENTER_CS\t0x0000482A\n+#define\tVMCS_PREEMPTION_TIMER_VALUE\t0x0000482E\n+\n+/* 32-bit host state fields */\n+#define\tVMCS_HOST_IA32_SYSENTER_CS\t0x00004C00\n+\n+/* Natural Width control fields */\n+#define\tVMCS_CR0_MASK\t\t\t0x00006000\n+#define\tVMCS_CR4_MASK\t\t\t0x00006002\n+#define\tVMCS_CR0_SHADOW\t\t\t0x00006004\n+#define\tVMCS_CR4_SHADOW\t\t\t0x00006006\n+#define\tVMCS_CR3_TARGET0\t\t0x00006008\n+#define\tVMCS_CR3_TARGET1\t\t0x0000600A\n+#define\tVMCS_CR3_TARGET2\t\t0x0000600C\n+#define\tVMCS_CR3_TARGET3\t\t0x0000600E\n+\n+/* Natural Width read-only fields */\n+#define\tVMCS_EXIT_QUALIFICATION\t\t0x00006400\n+#define\tVMCS_IO_RCX\t\t\t0x00006402\n+#define\tVMCS_IO_RSI\t\t\t0x00006404\n+#define\tVMCS_IO_RDI\t\t\t0x00006406\n+#define\tVMCS_IO_RIP\t\t\t0x00006408\n+#define\tVMCS_GUEST_LINEAR_ADDRESS\t0x0000640A\n+\n+/* Natural Width guest-state fields */\n+#define\tVMCS_GUEST_CR0\t\t\t0x00006800\n+#define\tVMCS_GUEST_CR3\t\t\t0x00006802\n+#define\tVMCS_GUEST_CR4\t\t\t0x00006804\n+#define\tVMCS_GUEST_ES_BASE\t\t0x00006806\n+#define\tVMCS_GUEST_CS_BASE\t\t0x00006808\n+#define\tVMCS_GUEST_SS_BASE\t\t0x0000680A\n+#define\tVMCS_GUEST_DS_BASE\t\t0x0000680C\n+#define\tVMCS_GUEST_FS_BASE\t\t0x0000680E\n+#define\tVMCS_GUEST_GS_BASE\t\t0x00006810\n+#define\tVMCS_GUEST_LDTR_BASE\t\t0x00006812\n+#define\tVMCS_GUEST_TR_BASE\t\t0x00006814\n+#define\tVMCS_GUEST_GDTR_BASE\t\t0x00006816\n+#define\tVMCS_GUEST_IDTR_BASE\t\t0x00006818\n+#define\tVMCS_GUEST_DR7\t\t\t0x0000681A\n+#define\tVMCS_GUEST_RSP\t\t\t0x0000681C\n+#define\tVMCS_GUEST_RIP\t\t\t0x0000681E\n+#define\tVMCS_GUEST_RFLAGS\t\t0x00006820\n+#define\tVMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822\n+#define\tVMCS_GUEST_IA32_SYSENTER_ESP\t0x00006824\n+#define\tVMCS_GUEST_IA32_SYSENTER_EIP\t0x00006826\n+\n+/* Natural Width host-state fields */\n+#define\tVMCS_HOST_CR0\t\t\t0x00006C00\n+#define\tVMCS_HOST_CR3\t\t\t0x00006C02\n+#define\tVMCS_HOST_CR4\t\t\t0x00006C04\n+#define\tVMCS_HOST_FS_BASE\t\t0x00006C06\n+#define\tVMCS_HOST_GS_BASE\t\t0x00006C08\n+#define\tVMCS_HOST_TR_BASE\t\t0x00006C0A\n+#define\tVMCS_HOST_GDTR_BASE\t\t0x00006C0C\n+#define\tVMCS_HOST_IDTR_BASE\t\t0x00006C0E\n+#define\tVMCS_HOST_IA32_SYSENTER_ESP\t0x00006C10\n+#define\tVMCS_HOST_IA32_SYSENTER_EIP\t0x00006C12\n+#define\tVMCS_HOST_RSP\t\t\t0x00006C14\n+#define\tVMCS_HOST_RIP\t\t\t0x00006c16\n+\n+/*\n+ * VM instruction error numbers\n+ */\n+#define\tVMRESUME_WITH_NON_LAUNCHED_VMCS\t5\n+\n+/*\n+ * VMCS exit reasons\n+ */\n+#define EXIT_REASON_EXCEPTION\t\t0\n+#define EXIT_REASON_EXT_INTR\t\t1\n+#define EXIT_REASON_TRIPLE_FAULT\t2\n+#define EXIT_REASON_INIT\t\t3\n+#define EXIT_REASON_SIPI\t\t4\n+#define EXIT_REASON_IO_SMI\t\t5\n+#define EXIT_REASON_SMI\t\t\t6\n+#define EXIT_REASON_INTR_WINDOW\t\t7\n+#define EXIT_REASON_NMI_WINDOW\t\t8\n+#define EXIT_REASON_TASK_SWITCH\t\t9\n+#define EXIT_REASON_CPUID\t\t10\n+#define EXIT_REASON_GETSEC\t\t11\n+#define EXIT_REASON_HLT\t\t\t12\n+#define EXIT_REASON_INVD\t\t13\n+#define EXIT_REASON_INVLPG\t\t14\n+#define EXIT_REASON_RDPMC\t\t15\n+#define EXIT_REASON_RDTSC\t\t16\n+#define EXIT_REASON_RSM\t\t\t17\n+#define EXIT_REASON_VMCALL\t\t18\n+#define EXIT_REASON_VMCLEAR\t\t19\n+#define EXIT_REASON_VMLAUNCH\t\t20\n+#define EXIT_REASON_VMPTRLD\t\t21\n+#define EXIT_REASON_VMPTRST\t\t22\n+#define EXIT_REASON_VMREAD\t\t23\n+#define EXIT_REASON_VMRESUME\t\t24\n+#define EXIT_REASON_VMWRITE\t\t25\n+#define EXIT_REASON_VMXOFF\t\t26\n+#define EXIT_REASON_VMXON\t\t27\n+#define EXIT_REASON_CR_ACCESS\t\t28\n+#define EXIT_REASON_DR_ACCESS\t\t29\n+#define EXIT_REASON_INOUT\t\t30\n+#define EXIT_REASON_RDMSR\t\t31\n+#define EXIT_REASON_WRMSR\t\t32\n+#define EXIT_REASON_INVAL_VMCS\t\t33\n+#define EXIT_REASON_INVAL_MSR\t\t34\n+#define EXIT_REASON_MWAIT\t\t36\n+#define EXIT_REASON_MTF\t\t\t37\n+#define EXIT_REASON_MONITOR\t\t39\n+#define EXIT_REASON_PAUSE\t\t40\n+#define EXIT_REASON_MCE_DURING_ENTRY\t41\n+#define EXIT_REASON_TPR\t\t\t43\n+#define EXIT_REASON_APIC_ACCESS\t\t44\n+#define\tEXIT_REASON_VIRTUALIZED_EOI\t45\n+#define EXIT_REASON_GDTR_IDTR\t\t46\n+#define EXIT_REASON_LDTR_TR\t\t47\n+#define EXIT_REASON_EPT_FAULT\t\t48\n+#define EXIT_REASON_EPT_MISCONFIG\t49\n+#define EXIT_REASON_INVEPT\t\t50\n+#define EXIT_REASON_RDTSCP\t\t51\n+#define EXIT_REASON_VMX_PREEMPT\t\t52\n+#define EXIT_REASON_INVVPID\t\t53\n+#define EXIT_REASON_WBINVD\t\t54\n+#define EXIT_REASON_XSETBV\t\t55\n+#define\tEXIT_REASON_APIC_WRITE\t\t56\n+\n+/*\n+ * NMI unblocking due to IRET.\n+ *\n+ * Applies to VM-exits due to hardware exception or EPT fault.\n+ */\n+#define\tEXIT_QUAL_NMIUDTI\t(1 << 12)\n+/*\n+ * VMCS interrupt information fields\n+ */\n+#define\tVMCS_INTR_VALID\t\t(1U << 31)\n+#define\tVMCS_INTR_T_MASK\t0x700\t\t/* Interruption-info type */\n+#define\tVMCS_INTR_T_HWINTR\t(0 << 8)\n+#define\tVMCS_INTR_T_NMI\t\t(2 << 8)\n+#define\tVMCS_INTR_T_HWEXCEPTION\t(3 << 8)\n+#define\tVMCS_INTR_T_SWINTR\t(4 << 8)\n+#define\tVMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)\n+#define\tVMCS_INTR_T_SWEXCEPTION\t(6 << 8)\n+#define\tVMCS_INTR_DEL_ERRCODE\t(1 << 11)\n+\n+/*\n+ * VMCS IDT-Vectoring information fields\n+ */\n+#define\tVMCS_IDT_VEC_VALID          (1U << 31)\n+#define\tVMCS_IDT_VEC_TYPE           0x700\n+#define\tVMCS_IDT_VEC_ERRCODE_VALID\t(1U << 11)\n+#define\tVMCS_IDT_VEC_HWINTR         (0 << 8)\n+#define\tVMCS_IDT_VEC_NMI            (2 << 8)\n+#define\tVMCS_IDT_VEC_HWEXCEPTION\t(3 << 8)\n+#define\tVMCS_IDT_VEC_SWINTR         (4 << 8)\n+\n+/*\n+ * VMCS Guest interruptibility field\n+ */\n+#define\tVMCS_INTERRUPTIBILITY_STI_BLOCKING\t(1 << 0)\n+#define\tVMCS_INTERRUPTIBILITY_MOVSS_BLOCKING\t(1 << 1)\n+#define\tVMCS_INTERRUPTIBILITY_SMI_BLOCKING\t(1 << 2)\n+#define\tVMCS_INTERRUPTIBILITY_NMI_BLOCKING\t(1 << 3)\n+\n+/*\n+ * Exit qualification for EXIT_REASON_INVAL_VMCS\n+ */\n+#define\tEXIT_QUAL_NMI_WHILE_STI_BLOCKING\t3\n+\n+/*\n+ * Exit qualification for EPT violation\n+ */\n+#define\tEPT_VIOLATION_DATA_READ\t\t(1UL << 0)\n+#define\tEPT_VIOLATION_DATA_WRITE\t(1UL << 1)\n+#define\tEPT_VIOLATION_INST_FETCH\t(1UL << 2)\n+#define\tEPT_VIOLATION_GPA_READABLE\t(1UL << 3)\n+#define\tEPT_VIOLATION_GPA_WRITEABLE\t(1UL << 4)\n+#define\tEPT_VIOLATION_GPA_EXECUTABLE\t(1UL << 5)\n+#define\tEPT_VIOLATION_GLA_VALID\t\t(1UL << 7)\n+#define\tEPT_VIOLATION_XLAT_VALID\t(1UL << 8)\n+\n+/*\n+ * Exit qualification for APIC-access VM exit\n+ */\n+#define\tAPIC_ACCESS_OFFSET(qual)\t((qual) & 0xFFF)\n+#define\tAPIC_ACCESS_TYPE(qual)\t\t(((qual) >> 12) & 0xF)\n+\n+/*\n+ * Exit qualification for APIC-write VM exit\n+ */\n+#define\tAPIC_WRITE_OFFSET(qual)\t\t((qual) & 0xFFF)\n+\n+\n+#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING    (1 << 2)\n+#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET    (1 << 3)\n+#define VMCS_PRI_PROC_BASED_CTLS_HLT           (1 << 7)\n+#define VMCS_PRI_PROC_BASED_CTLS_MWAIT         (1 << 10)\n+#define VMCS_PRI_PROC_BASED_CTLS_TSC           (1 << 12)\n+#define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD      (1 << 19)\n+#define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE     (1 << 20)\n+#define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW    (1 << 21)\n+#define VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING    (1 << 22)\n+#define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL   (1 << 31)\n+\n+#define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0)\n+#define VMCS_PRI_PROC_BASED2_CTLS_X2APIC        (1 << 4)\n+\n+enum task_switch_reason {\n+\tTSR_CALL,\n+\tTSR_IRET,\n+    TSR_JMP,\n+\tTSR_IDT_GATE,\t/* task gate in IDT */\n+};\n+\n+#endif\ndiff --git a/target/i386/hvf-utils/vmx.h b/target/i386/hvf-utils/vmx.h\nnew file mode 100644\nindex 0000000000..8a080e6777\n--- /dev/null\n+++ b/target/i386/hvf-utils/vmx.h\n@@ -0,0 +1,200 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ * Based on Veertu vddh/vmm/vmx.h\n+ *\n+ * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#ifndef VMX_H\n+#define VMX_H\n+\n+#include <stdint.h>\n+#include <Hypervisor/hv.h>\n+#include <Hypervisor/hv_vmx.h>\n+#include \"vmcs.h\"\n+#include \"cpu.h\"\n+#include \"x86.h\"\n+\n+#include \"exec/address-spaces.h\"\n+\n+static uint64_t inline rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)\n+{\n+\tuint64_t v;\n+\n+\tif (hv_vcpu_read_register(vcpu, reg, &v)) {\n+\t\tabort();\n+\t}\n+\n+\treturn v;\n+}\n+\n+/* write GPR */\n+static void inline wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)\n+{\n+\tif (hv_vcpu_write_register(vcpu, reg, v)) {\n+\t\tabort();\n+\t}\n+}\n+\n+/* read VMCS field */\n+static uint64_t inline rvmcs(hv_vcpuid_t vcpu, uint32_t field)\n+{\n+\tuint64_t v;\n+\n+\thv_vmx_vcpu_read_vmcs(vcpu, field, &v);\n+\n+\treturn v;\n+}\n+\n+/* write VMCS field */\n+static void inline wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)\n+{\n+\thv_vmx_vcpu_write_vmcs(vcpu, field, v);\n+}\n+\n+/* desired control word constrained by hardware/hypervisor capabilities */\n+static uint64_t inline cap2ctrl(uint64_t cap, uint64_t ctrl)\n+{\n+\treturn (ctrl | (cap & 0xffffffff)) & (cap >> 32);\n+}\n+\n+#define VM_ENTRY_GUEST_LMA (1LL << 9)\n+\n+#define AR_TYPE_ACCESSES_MASK 1\n+#define AR_TYPE_READABLE_MASK (1 << 1)\n+#define AR_TYPE_WRITEABLE_MASK (1 << 2)\n+#define AR_TYPE_CODE_MASK (1 << 3)\n+#define AR_TYPE_MASK 0x0f\n+#define AR_TYPE_BUSY_64_TSS 11\n+#define AR_TYPE_BUSY_32_TSS 11\n+#define AR_TYPE_BUSY_16_TSS 3\n+#define AR_TYPE_LDT 2\n+\n+static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)\n+{\n+    uint64_t entry_ctls;\n+\n+    efer |= EFER_LMA;\n+    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);\n+    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);\n+    wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) | VM_ENTRY_GUEST_LMA);\n+\n+    uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);\n+    if ((efer & EFER_LME) && (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {\n+        wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS, (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);\n+    }\n+}\n+\n+static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)\n+{\n+    uint64_t entry_ctls;\n+\n+    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);\n+    wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);\n+\n+    efer &= ~EFER_LMA;\n+    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);\n+}\n+\n+static void inline macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)\n+{\n+    int i;\n+    uint64_t pdpte[4] = {0, 0, 0, 0};\n+    uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);\n+    uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);\n+\n+    if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && !(efer & EFER_LME))\n+        address_space_rw(&address_space_memory, rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,\n+                         MEMTXATTRS_UNSPECIFIED,\n+                         (uint8_t *)pdpte, 32, 0);\n+\n+    for (i = 0; i < 4; i++)\n+        wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);\n+\n+    wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);\n+    wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);\n+\n+    cr0 &= ~CR0_CD;\n+    wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE| CR0_ET);\n+\n+    if (efer & EFER_LME) {\n+        if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG))\n+             enter_long_mode(vcpu, cr0, efer);\n+        if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG))\n+            exit_long_mode(vcpu, cr0, efer);\n+    }\n+\n+    hv_vcpu_invalidate_tlb(vcpu);\n+    hv_vcpu_flush(vcpu);\n+}\n+\n+static void inline macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)\n+{\n+    uint64_t guest_cr4 = cr4 | CR4_VMXE;\n+\n+    wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);\n+    wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);\n+\n+    hv_vcpu_invalidate_tlb(vcpu);\n+    hv_vcpu_flush(vcpu);\n+}\n+\n+static void inline macvm_set_rip(CPUState *cpu, uint64_t rip)\n+{\n+    uint64_t val;\n+\n+    /* BUG, should take considering overlap.. */\n+    wreg(cpu->hvf_fd, HV_X86_RIP, rip);\n+\n+    /* after moving forward in rip, we need to clean INTERRUPTABILITY */\n+   val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+   if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING))\n+        wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,\n+              val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n+}\n+\n+static void inline vmx_clear_nmi_blocking(CPUState *cpu)\n+{\n+    uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+    gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n+}\n+\n+static void inline vmx_set_nmi_blocking(CPUState *cpu)\n+{\n+    uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+    gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n+    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n+}\n+\n+static void inline vmx_set_nmi_window_exiting(CPUState *cpu)\n+{\n+    uint64_t val;\n+    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n+    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n+\n+}\n+\n+static void inline vmx_clear_nmi_window_exiting(CPUState *cpu)\n+{\n+\n+    uint64_t val;\n+    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n+    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n+}\n+\n+#endif\ndiff --git a/target/i386/hvf-utils/x86.c b/target/i386/hvf-utils/x86.c\nnew file mode 100644\nindex 0000000000..e3db2c9c8b\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86.c\n@@ -0,0 +1,174 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#include \"qemu/osdep.h\"\n+\n+#include \"qemu-common.h\"\n+#include \"x86_decode.h\"\n+#include \"x86_emu.h\"\n+#include \"vmcs.h\"\n+#include \"vmx.h\"\n+#include \"x86_mmu.h\"\n+#include \"x86_descr.h\"\n+\n+static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)\n+{\n+    uint32_t ar;\n+\n+    if (!var->p) {\n+        ar = 1 << 16;\n+        return ar;\n+    }\n+\n+    ar = var->type & 15;\n+    ar |= (var->s & 1) << 4;\n+    ar |= (var->dpl & 3) << 5;\n+    ar |= (var->p & 1) << 7;\n+    ar |= (var->avl & 1) << 12;\n+    ar |= (var->l & 1) << 13;\n+    ar |= (var->db & 1) << 14;\n+    ar |= (var->g & 1) << 15;\n+    return ar;\n+}\n+\n+bool x86_read_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel)\n+{\n+    addr_t base;\n+    uint32_t limit;\n+\n+    ZERO_INIT(*desc);\n+    // valid gdt descriptors start from index 1\n+    if (!sel.index && GDT_SEL == sel.ti)\n+        return false;\n+\n+    if (GDT_SEL == sel.ti) {\n+        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n+        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n+    } else {\n+        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);\n+        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n+    }\n+\n+    if (sel.index * 8 >= limit)\n+        return false;\n+\n+    vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));\n+    return true;\n+}\n+\n+bool x86_write_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel)\n+{\n+    addr_t base;\n+    uint32_t limit;\n+    \n+    if (GDT_SEL == sel.ti) {\n+        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n+        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n+    } else {\n+        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);\n+        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n+    }\n+    \n+    if (sel.index * 8 >= limit) {\n+        printf(\"%s: gdt limit\\n\", __FUNCTION__);\n+        return false;\n+    }\n+    vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));\n+    return true;\n+}\n+\n+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate)\n+{\n+    addr_t base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);\n+    uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);\n+\n+    ZERO_INIT(*idt_desc);\n+    if (gate * 8 >= limit) {\n+        printf(\"%s: idt limit\\n\", __FUNCTION__);\n+        return false;\n+    }\n+\n+    vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));\n+    return true;\n+}\n+\n+bool x86_is_protected(struct CPUState *cpu)\n+{\n+    uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    return cr0 & CR0_PE;\n+}\n+\n+bool x86_is_real(struct CPUState *cpu)\n+{\n+    return !x86_is_protected(cpu);\n+}\n+\n+bool x86_is_v8086(struct CPUState *cpu)\n+{\n+    return (x86_is_protected(cpu) && (RFLAGS(cpu) & RFLAGS_VM));\n+}\n+\n+bool x86_is_long_mode(struct CPUState *cpu)\n+{\n+    return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & EFER_LMA;\n+}\n+\n+bool x86_is_long64_mode(struct CPUState *cpu)\n+{\n+    struct vmx_segment desc;\n+    vmx_read_segment_descriptor(cpu, &desc, REG_SEG_CS);\n+\n+    return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);\n+}\n+\n+bool x86_is_paging_mode(struct CPUState *cpu)\n+{\n+    uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    return cr0 & CR0_PG;\n+}\n+\n+bool x86_is_pae_enabled(struct CPUState *cpu)\n+{\n+    uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);\n+    return cr4 & CR4_PAE;\n+}\n+\n+addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg)\n+{\n+    return vmx_read_segment_base(cpu, seg) + addr;\n+}\n+\n+addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size, x86_reg_segment seg)\n+{\n+    switch (size) {\n+        case 2:\n+            addr = (uint16_t)addr;\n+            break;\n+        case 4:\n+            addr = (uint32_t)addr;\n+            break;\n+        default:\n+            break;\n+    }\n+    return linear_addr(cpu, addr, seg);\n+}\n+\n+addr_t linear_rip(struct CPUState *cpu, addr_t rip)\n+{\n+    return linear_addr(cpu, rip, REG_SEG_CS);\n+}\ndiff --git a/target/i386/hvf-utils/x86.h b/target/i386/hvf-utils/x86.h\nnew file mode 100644\nindex 0000000000..5dffdd6568\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86.h\n@@ -0,0 +1,470 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Veertu Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#pragma once\n+\n+#include <sys/types.h>\n+#include <sys/ioctl.h>\n+#include <sys/mman.h>\n+#include <stdarg.h>\n+#include \"qemu-common.h\"\n+#include \"x86_flags.h\"\n+\n+// exceptions\n+typedef enum x86_exception {\n+    EXCEPTION_DE,           // divide error\n+    EXCEPTION_DB,           // debug fault\n+    EXCEPTION_NMI,          // non-maskable interrupt\n+    EXCEPTION_BP,           // breakpoint\ttrap\n+    EXCEPTION_OF,           // overflow\ttrap\n+    EXCEPTION_BR,           // boundary range exceeded\tfault\n+    EXCEPTION_UD,           // undefined opcode\n+    EXCEPTION_NM,           // device not available\n+    EXCEPTION_DF,           // double fault\n+    EXCEPTION_RSVD,         // not defined\n+    EXCEPTION_TS,           // invalid TSS\tfault\n+    EXCEPTION_NP,           // not present\tfault\n+    EXCEPTION_GP,           // general protection\tfault\n+    EXCEPTION_PF,           // page fault\n+    EXCEPTION_RSVD2,        // not defined\n+} x86_exception;\n+\n+// general purpose regs\n+typedef enum x86_reg_name {\n+    REG_RAX = 0,\n+    REG_RCX = 1,\n+    REG_RDX = 2,\n+    REG_RBX = 3,\n+    REG_RSP = 4,\n+    REG_RBP = 5,\n+    REG_RSI = 6,\n+    REG_RDI = 7,\n+    REG_R8 = 8,\n+    REG_R9 = 9,\n+    REG_R10 = 10,\n+    REG_R11 = 11,\n+    REG_R12 = 12,\n+    REG_R13 = 13,\n+    REG_R14 = 14,\n+    REG_R15 = 15,\n+} x86_reg_name;\n+\n+// segment regs\n+typedef enum x86_reg_segment {\n+    REG_SEG_ES = 0,\n+    REG_SEG_CS = 1,\n+    REG_SEG_SS = 2,\n+    REG_SEG_DS = 3,\n+    REG_SEG_FS = 4,\n+    REG_SEG_GS = 5,\n+    REG_SEG_LDTR = 6,\n+    REG_SEG_TR = 7,\n+} x86_reg_segment;\n+\n+typedef struct x86_register\n+{\n+    union {\n+        struct {\n+            uint64_t rrx;               // full 64 bit\n+        };\n+        struct {\n+            uint32_t erx;               // low 32 bit part\n+            uint32_t hi32_unused1;\n+        };\n+        struct {\n+            uint16_t rx;                // low 16 bit part\n+            uint16_t hi16_unused1;\n+            uint32_t hi32_unused2;\n+        };\n+        struct {\n+            uint8_t lx;                 // low 8 bit part\n+            uint8_t hx;                 // high 8 bit\n+            uint16_t hi16_unused2;\n+            uint32_t hi32_unused3;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x86_register;\n+\n+typedef enum x86_rflags {\n+    RFLAGS_CF       = (1L << 0),\n+    RFLAGS_PF       = (1L << 2),\n+    RFLAGS_AF       = (1L << 4),\n+    RFLAGS_ZF       = (1L << 6),\n+    RFLAGS_SF       = (1L << 7),\n+    RFLAGS_TF       = (1L << 8),\n+    RFLAGS_IF       = (1L << 9),\n+    RFLAGS_DF       = (1L << 10),\n+    RFLAGS_OF       = (1L << 11),\n+    RFLAGS_IOPL     = (3L << 12),\n+    RFLAGS_NT       = (1L << 14),\n+    RFLAGS_RF       = (1L << 16),\n+    RFLAGS_VM       = (1L << 17),\n+    RFLAGS_AC       = (1L << 18),\n+    RFLAGS_VIF      = (1L << 19),\n+    RFLAGS_VIP      = (1L << 20),\n+    RFLAGS_ID       = (1L << 21),\n+} x86_rflags;\n+\n+// rflags register\n+typedef struct x86_reg_flags {\n+    union {\n+        struct {\n+            uint64_t rflags;\n+        };\n+        struct {\n+            uint32_t eflags;\n+            uint32_t hi32_unused1;\n+        };\n+        struct {\n+            uint32_t cf:1;\n+            uint32_t unused1:1;\n+            uint32_t pf:1;\n+            uint32_t unused2:1;\n+            uint32_t af:1;\n+            uint32_t unused3:1;\n+            uint32_t zf:1;\n+            uint32_t sf:1;\n+            uint32_t tf:1;\n+            uint32_t ief:1;\n+            uint32_t df:1;\n+            uint32_t of:1;\n+            uint32_t iopl:2;\n+            uint32_t nt:1;\n+            uint32_t unused4:1;\n+            uint32_t rf:1;\n+            uint32_t vm:1;\n+            uint32_t ac:1;\n+            uint32_t vif:1;\n+            uint32_t vip:1;\n+            uint32_t id:1;\n+            uint32_t unused5:10;\n+            uint32_t hi32_unused2;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x86_reg_flags;\n+\n+typedef enum x86_reg_efer {\n+    EFER_SCE =          (1L << 0),\n+    EFER_LME =          (1L << 8),\n+    EFER_LMA =          (1L << 10),\n+    EFER_NXE =          (1L << 11),\n+    EFER_SVME =         (1L << 12),\n+    EFER_FXSR =         (1L << 14),\n+} x86_reg_efer;\n+\n+typedef struct x86_efer {\n+    uint64_t efer;\n+} __attribute__ ((__packed__)) x86_efer;\n+\n+typedef enum x86_reg_cr0 {\n+    CR0_PE =            (1L << 0),\n+    CR0_MP =            (1L << 1),\n+    CR0_EM =            (1L << 2),\n+    CR0_TS =            (1L << 3),\n+    CR0_ET =            (1L << 4),\n+    CR0_NE =            (1L << 5),\n+    CR0_WP =            (1L << 16),\n+    CR0_AM =            (1L << 18),\n+    CR0_NW =            (1L << 29),\n+    CR0_CD =            (1L << 30),\n+    CR0_PG =            (1L << 31),\n+} x86_reg_cr0;\n+\n+typedef enum x86_reg_cr4 {\n+    CR4_VME =            (1L << 0),\n+    CR4_PVI =            (1L << 1),\n+    CR4_TSD =            (1L << 2),\n+    CR4_DE  =            (1L << 3),\n+    CR4_PSE =            (1L << 4),\n+    CR4_PAE =            (1L << 5),\n+    CR4_MSE =            (1L << 6),\n+    CR4_PGE =            (1L << 7),\n+    CR4_PCE =            (1L << 8),\n+    CR4_OSFXSR =         (1L << 9),\n+    CR4_OSXMMEXCPT =     (1L << 10),\n+    CR4_VMXE =           (1L << 13),\n+    CR4_SMXE =           (1L << 14),\n+    CR4_FSGSBASE =       (1L << 16),\n+    CR4_PCIDE =          (1L << 17),\n+    CR4_OSXSAVE =        (1L << 18),\n+    CR4_SMEP =           (1L << 20),\n+} x86_reg_cr4;\n+\n+// 16 bit Task State Segment\n+typedef struct x86_tss_segment16 {\n+    uint16_t link;\n+    uint16_t sp0;\n+    uint16_t ss0;\n+    uint32_t sp1;\n+    uint16_t ss1;\n+    uint32_t sp2;\n+    uint16_t ss2;\n+    uint16_t ip;\n+    uint16_t flags;\n+    uint16_t ax;\n+    uint16_t cx;\n+    uint16_t dx;\n+    uint16_t bx;\n+    uint16_t sp;\n+    uint16_t bp;\n+    uint16_t si;\n+    uint16_t di;\n+    uint16_t es;\n+    uint16_t cs;\n+    uint16_t ss;\n+    uint16_t ds;\n+    uint16_t ldtr;\n+} __attribute__((packed)) x86_tss_segment16;\n+\n+// 32 bit Task State Segment\n+typedef struct x86_tss_segment32\n+{\n+    uint32_t prev_tss;\n+    uint32_t esp0;\n+    uint32_t ss0;\n+    uint32_t esp1;\n+    uint32_t ss1;\n+    uint32_t esp2;\n+    uint32_t ss2;\n+    uint32_t cr3;\n+    uint32_t eip;\n+    uint32_t eflags;\n+    uint32_t eax;\n+    uint32_t ecx;\n+    uint32_t edx;\n+    uint32_t ebx;\n+    uint32_t esp;\n+    uint32_t ebp;\n+    uint32_t esi;\n+    uint32_t edi;\n+    uint32_t es;\n+    uint32_t cs;\n+    uint32_t ss;\n+    uint32_t ds;\n+    uint32_t fs;\n+    uint32_t gs;\n+    uint32_t ldt;\n+    uint16_t trap;\n+    uint16_t iomap_base;\n+} __attribute__ ((__packed__)) x86_tss_segment32;\n+\n+// 64 bit Task State Segment\n+typedef struct x86_tss_segment64\n+{\n+    uint32_t unused;\n+    uint64_t rsp0;\n+    uint64_t rsp1;\n+    uint64_t rsp2;\n+    uint64_t unused1;\n+    uint64_t ist1;\n+    uint64_t ist2;\n+    uint64_t ist3;\n+    uint64_t ist4;\n+    uint64_t ist5;\n+    uint64_t ist6;\n+    uint64_t ist7;\n+    uint64_t unused2;\n+    uint16_t unused3;\n+    uint16_t iomap_base;\n+} __attribute__ ((__packed__)) x86_tss_segment64;\n+\n+// segment descriptors\n+typedef struct x86_segment_descriptor {\n+    uint64_t    limit0:16;\n+    uint64_t    base0:16;\n+    uint64_t    base1:8;\n+    uint64_t    type:4;\n+    uint64_t    s:1;\n+    uint64_t    dpl:2;\n+    uint64_t    p:1;\n+    uint64_t    limit1:4;\n+    uint64_t    avl:1;\n+    uint64_t    l:1;\n+    uint64_t    db:1;\n+    uint64_t    g:1;\n+    uint64_t    base2:8;\n+} __attribute__ ((__packed__)) x86_segment_descriptor;\n+\n+static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)\n+{\n+    return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);\n+}\n+\n+static inline void x86_set_segment_base(x86_segment_descriptor *desc, uint32_t base)\n+{\n+    desc->base2 = base >> 24;\n+    desc->base1 = (base >> 16) & 0xff;\n+    desc->base0 = base & 0xffff;\n+}\n+\n+static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)\n+{\n+    uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);\n+    if (desc->g)\n+        return (limit << 12) | 0xfff;\n+    return limit;\n+}\n+\n+static inline void x86_set_segment_limit(x86_segment_descriptor *desc, uint32_t limit)\n+{\n+    desc->limit0 = limit & 0xffff;\n+    desc->limit1 = limit >> 16;\n+}\n+\n+typedef struct x86_call_gate {\n+    uint64_t offset0:16;\n+    uint64_t selector:16;\n+    uint64_t param_count:4;\n+    uint64_t reserved:3;\n+    uint64_t type:4;\n+    uint64_t dpl:1;\n+    uint64_t p:1;\n+    uint64_t offset1:16;\n+} __attribute__ ((__packed__)) x86_call_gate;\n+\n+static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)\n+{\n+    return (uint32_t)((gate->offset1 << 16) | gate->offset0);\n+}\n+\n+#define LDT_SEL     0\n+#define GDT_SEL     1\n+\n+typedef struct x68_segment_selector {\n+    union {\n+        uint16_t sel;\n+        struct {\n+            uint16_t rpl:3;\n+            uint16_t ti:1;\n+            uint16_t index:12;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x68_segment_selector;\n+\n+// Definition of hvf_x86_state is here\n+struct hvf_x86_state {\n+    int hlt;\n+    uint64_t init_tsc;\n+    \n+    int interruptable;\n+    uint64_t exp_rip;\n+    uint64_t fetch_rip;\n+    uint64_t rip;\n+    struct x86_register regs[16];\n+    struct x86_reg_flags   rflags;\n+    struct lazy_flags   lflags;\n+    struct x86_efer efer;\n+    uint8_t mmio_buf[4096];\n+    uint8_t* apic_page;\n+};\n+\n+/*\n+* hvf xsave area\n+*/\n+struct hvf_xsave_buf {\n+    uint32_t data[1024];\n+};\n+\n+// useful register access  macros\n+#define RIP(cpu)    (cpu->hvf_x86->rip)\n+#define EIP(cpu)    ((uint32_t)cpu->hvf_x86->rip)\n+#define RFLAGS(cpu) (cpu->hvf_x86->rflags.rflags)\n+#define EFLAGS(cpu) (cpu->hvf_x86->rflags.eflags)\n+\n+#define RRX(cpu, reg) (cpu->hvf_x86->regs[reg].rrx)\n+#define RAX(cpu)        RRX(cpu, REG_RAX)\n+#define RCX(cpu)        RRX(cpu, REG_RCX)\n+#define RDX(cpu)        RRX(cpu, REG_RDX)\n+#define RBX(cpu)        RRX(cpu, REG_RBX)\n+#define RSP(cpu)        RRX(cpu, REG_RSP)\n+#define RBP(cpu)        RRX(cpu, REG_RBP)\n+#define RSI(cpu)        RRX(cpu, REG_RSI)\n+#define RDI(cpu)        RRX(cpu, REG_RDI)\n+#define R8(cpu)         RRX(cpu, REG_R8)\n+#define R9(cpu)         RRX(cpu, REG_R9)\n+#define R10(cpu)        RRX(cpu, REG_R10)\n+#define R11(cpu)        RRX(cpu, REG_R11)\n+#define R12(cpu)        RRX(cpu, REG_R12)\n+#define R13(cpu)        RRX(cpu, REG_R13)\n+#define R14(cpu)        RRX(cpu, REG_R14)\n+#define R15(cpu)        RRX(cpu, REG_R15)\n+\n+#define ERX(cpu, reg)   (cpu->hvf_x86->regs[reg].erx)\n+#define EAX(cpu)        ERX(cpu, REG_RAX)\n+#define ECX(cpu)        ERX(cpu, REG_RCX)\n+#define EDX(cpu)        ERX(cpu, REG_RDX)\n+#define EBX(cpu)        ERX(cpu, REG_RBX)\n+#define ESP(cpu)        ERX(cpu, REG_RSP)\n+#define EBP(cpu)        ERX(cpu, REG_RBP)\n+#define ESI(cpu)        ERX(cpu, REG_RSI)\n+#define EDI(cpu)        ERX(cpu, REG_RDI)\n+\n+#define RX(cpu, reg)   (cpu->hvf_x86->regs[reg].rx)\n+#define AX(cpu)        RX(cpu, REG_RAX)\n+#define CX(cpu)        RX(cpu, REG_RCX)\n+#define DX(cpu)        RX(cpu, REG_RDX)\n+#define BP(cpu)        RX(cpu, REG_RBP)\n+#define SP(cpu)        RX(cpu, REG_RSP)\n+#define BX(cpu)        RX(cpu, REG_RBX)\n+#define SI(cpu)        RX(cpu, REG_RSI)\n+#define DI(cpu)        RX(cpu, REG_RDI)\n+\n+#define RL(cpu, reg)   (cpu->hvf_x86->regs[reg].lx)\n+#define AL(cpu)        RL(cpu, REG_RAX)\n+#define CL(cpu)        RL(cpu, REG_RCX)\n+#define DL(cpu)        RL(cpu, REG_RDX)\n+#define BL(cpu)        RL(cpu, REG_RBX)\n+\n+#define RH(cpu, reg)   (cpu->hvf_x86->regs[reg].hx)\n+#define AH(cpu)        RH(cpu, REG_RAX)\n+#define CH(cpu)        RH(cpu, REG_RCX)\n+#define DH(cpu)        RH(cpu, REG_RDX)\n+#define BH(cpu)        RH(cpu, REG_RBX)\n+\n+// deal with GDT/LDT descriptors in memory\n+bool x86_read_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel);\n+bool x86_write_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel);\n+\n+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate);\n+\n+// helpers\n+bool x86_is_protected(struct CPUState *cpu);\n+bool x86_is_real(struct CPUState *cpu);\n+bool x86_is_v8086(struct CPUState *cpu);\n+bool x86_is_long_mode(struct CPUState *cpu);\n+bool x86_is_long64_mode(struct CPUState *cpu);\n+bool x86_is_paging_mode(struct CPUState *cpu);\n+bool x86_is_pae_enabled(struct CPUState *cpu);\n+\n+addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg);\n+addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size, x86_reg_segment seg);\n+addr_t linear_rip(struct CPUState *cpu, addr_t rip);\n+\n+static inline uint64_t rdtscp(void)\n+{\n+    uint64_t tsc;\n+    __asm__ __volatile__(\"rdtscp; \"         // serializing read of tsc\n+                         \"shl $32,%%rdx; \"  // shift higher 32 bits stored in rdx up\n+                         \"or %%rdx,%%rax\"   // and or onto rax\n+                         : \"=a\"(tsc)        // output to tsc variable\n+                         :\n+                         : \"%rcx\", \"%rdx\"); // rcx and rdx are clobbered\n+    \n+    return tsc;\n+}\n+\ndiff --git a/target/i386/hvf-utils/x86_cpuid.c b/target/i386/hvf-utils/x86_cpuid.c\nnew file mode 100644\nindex 0000000000..e496cf001c\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_cpuid.c\n@@ -0,0 +1,270 @@\n+/*\n+ *  i386 CPUID helper functions\n+ *\n+ *  Copyright (c) 2003 Fabrice Bellard\n+ *  Copyright (c) 2017 Google Inc.\n+ *\n+ * This library is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU Lesser General Public\n+ * License as published by the Free Software Foundation; either\n+ * version 2 of the License, or (at your option) any later version.\n+ *\n+ * This library is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+ * Lesser General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU Lesser General Public\n+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.\n+ *\n+ * cpuid\n+ */\n+\n+#include \"qemu/osdep.h\"\n+#include \"x86_cpuid.h\"\n+#include \"x86.h\"\n+#include \"vmx.h\"\n+\n+#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \\\n+    CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \\\n+    CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \\\n+    CPUID_PAE | CPUID_SEP | CPUID_APIC)\n+\n+struct x86_cpuid builtin_cpus[] = {\n+    {\n+        .name = \"vmx32\",\n+        .vendor1  = CPUID_VENDOR_INTEL_1,\n+        .vendor2  = CPUID_VENDOR_INTEL_2,\n+        .vendor3  = CPUID_VENDOR_INTEL_3,\n+        .level = 4,\n+        .family = 6,\n+        .model = 3,\n+        .stepping = 3,\n+        .features = PPRO_FEATURES,\n+        .ext_features = /*CPUID_EXT_SSE3 |*/ CPUID_EXT_POPCNT, CPUID_MTRR | CPUID_CLFLUSH,\n+                    CPUID_PSE36,\n+        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,\n+        .ext3_features = 0,//CPUID_EXT3_LAHF_LM,\n+        .xlevel = 0x80000004,\n+        .model_id = \"vmx32\",\n+    },\n+    {\n+        .name = \"core2duo\",\n+        .vendor1  = CPUID_VENDOR_INTEL_1,\n+        .vendor2  = CPUID_VENDOR_INTEL_2,\n+        .vendor3  = CPUID_VENDOR_INTEL_3,\n+        .level = 10,\n+        .family = 6,\n+        .model = 15,\n+        .stepping = 11,\n+        .features = PPRO_FEATURES |\n+        CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |\n+        CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |\n+        CPUID_HT | CPUID_TM | CPUID_PBE,\n+        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_SSSE3 | \n+        CPUID_EXT_DTES64 | CPUID_EXT_DSCPL |\n+        CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM | CPUID_EXT_HYPERVISOR,\n+        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,\n+        .ext3_features = CPUID_EXT3_LAHF_LM,\n+        .xlevel = 0x80000008,\n+        .model_id = \"Intel(R) Core(TM)2 Duo GETCPU     T7700  @ 2.40GHz\",\n+    },\n+    {\n+        .name = \"vmX\",\n+        .vendor1  = CPUID_VENDOR_INTEL_1,\n+        .vendor2  = CPUID_VENDOR_INTEL_2,\n+        .vendor3  = CPUID_VENDOR_INTEL_3,\n+        .level = 0xd,\n+        .family = 6,\n+        .model = 15,\n+        .stepping = 11,\n+        .features = PPRO_FEATURES |\n+        CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |\n+        CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |\n+        CPUID_HT | CPUID_TM | CPUID_PBE,\n+        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_SSSE3 |\n+        CPUID_EXT_DTES64 | CPUID_EXT_DSCPL |\n+        CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM | CPUID_EXT_HYPERVISOR,\n+        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,\n+        .ext3_features = CPUID_EXT3_LAHF_LM,\n+        .xlevel = 0x80000008,\n+        .model_id = \"Common vmX processor\",\n+    },\n+};\n+\n+static struct x86_cpuid *_cpuid = NULL;\n+\n+void init_cpuid(struct CPUState* cpu)\n+{\n+    _cpuid = &builtin_cpus[2]; // core2duo\n+}\n+\n+void get_cpuid_func(struct CPUState* cpu, int func, int cnt, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)\n+{\n+   uint32_t h_rax, h_rbx, h_rcx, h_rdx;\n+   host_cpuid(func, cnt, &h_rax, &h_rbx, &h_rcx, &h_rdx);\n+   uint32_t apic_id = X86_CPU(cpu)->apic_id;\n+\n+\n+    *eax = *ebx = *ecx = *edx = 0;\n+    switch(func) {\n+        case 0:\n+            *eax = _cpuid->level;\n+            *ebx = _cpuid->vendor1;\n+            *edx = _cpuid->vendor2;\n+            *ecx = _cpuid->vendor3;\n+            break;\n+        case 1:\n+            *eax = h_rax;//_cpuid->stepping | (_cpuid->model << 3) | (_cpuid->family << 6);\n+            *ebx = (apic_id << 24) | (h_rbx & 0x00ffffff);\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+\n+            if (cpu->nr_cores * cpu->nr_threads > 1) {\n+                *ebx |= (cpu->nr_cores * cpu->nr_threads) << 16;\n+                *edx |= 1 << 28;    /* Enable Hyper-Threading */\n+            }\n+\n+            *ecx = *ecx & ~(CPUID_EXT_OSXSAVE | CPUID_EXT_MONITOR | CPUID_EXT_X2APIC |\n+                        CPUID_EXT_VMX | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_TM2 | CPUID_EXT_PCID |\n+                        CPUID_EXT_EST | CPUID_EXT_SSE42 | CPUID_EXT_SSE41);\n+            *ecx |= CPUID_EXT_HYPERVISOR;\n+            break;\n+        case 2:\n+            /* cache info: needed for Pentium Pro compatibility */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 4:\n+            /* cache info: needed for Core compatibility */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 5:\n+            /* mwait info: needed for Core compatibility */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 6:\n+            /* Thermal and Power Leaf */\n+            *eax = 0;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        case 7:\n+            *eax = h_rax;\n+            *ebx = h_rbx & ~(CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF | CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |\n+                             CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INVPCID);\n+            *ecx = h_rcx & ~(CPUID_7_0_ECX_AVX512BMI);\n+            *edx = h_rdx;\n+            break;\n+        case 9:\n+            /* Direct Cache Access Information Leaf */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 0xA:\n+            /* Architectural Performance Monitoring Leaf */\n+            *eax = 0;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        case 0xB:\n+            /* CPU Topology Leaf */\n+            *eax = 0;\n+            *ebx = 0;   /* Means that we don't support this leaf */\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        case 0xD:\n+            *eax = h_rax;\n+            if (!cnt)\n+                *eax &= (XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK);\n+            if (1 == cnt)\n+                *eax &= (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC);\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 0x80000000:\n+            *eax = _cpuid->xlevel;\n+            *ebx = _cpuid->vendor1;\n+            *edx = _cpuid->vendor2;\n+            *ecx = _cpuid->vendor3;\n+            break;\n+        case 0x80000001:\n+            *eax = h_rax;//_cpuid->stepping | (_cpuid->model << 3) | (_cpuid->family << 6);\n+            *ebx = 0;\n+            *ecx = _cpuid->ext3_features & h_rcx;\n+            *edx = _cpuid->ext2_features & h_rdx;\n+            break;\n+        case 0x80000002:\n+        case 0x80000003:\n+        case 0x80000004:\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 0x80000005:\n+            /* cache info (L1 cache) */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 0x80000006:\n+            /* cache info (L2 cache) */\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = h_rcx;\n+            *edx = h_rdx;\n+            break;\n+        case 0x80000007:\n+            *eax = 0;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;   /* Note - We disable invariant TSC (bit 8) in purpose */\n+            break;\n+        case 0x80000008:\n+            /* virtual & phys address size in low 2 bytes. */\n+            *eax = h_rax;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        case 0x8000000A:\n+            *eax = 0;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        case 0x80000019:\n+            *eax = h_rax;\n+            *ebx = h_rbx;\n+            *ecx = 0;\n+            *edx = 0;\n+        case 0xC0000000:\n+            *eax = _cpuid->xlevel2;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+        default:\n+            *eax = 0;\n+            *ebx = 0;\n+            *ecx = 0;\n+            *edx = 0;\n+            break;\n+    }\n+}\ndiff --git a/target/i386/hvf-utils/x86_cpuid.h b/target/i386/hvf-utils/x86_cpuid.h\nnew file mode 100644\nindex 0000000000..02f2f115b0\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_cpuid.h\n@@ -0,0 +1,51 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+#ifndef __CPUID_H__\n+#define __CPUID_H__\n+\n+#include <sys/types.h>\n+#include <sys/ioctl.h>\n+#include <sys/mman.h>\n+#include <stdarg.h>\n+#include \"qemu-common.h\"\n+#include \"x86_flags.h\"\n+\n+struct x86_cpuid {\n+    const char *name;\n+    uint32_t level;\n+    uint32_t vendor1, vendor2, vendor3;\n+    int family;\n+    int model;\n+    int stepping;\n+    int tsc_khz;\n+    uint32_t features, ext_features, ext2_features, ext3_features;\n+    uint32_t kvm_features, svm_features;\n+    uint32_t xlevel;\n+    char model_id[48];\n+    int vendor_override;\n+    uint32_t flags;\n+    uint32_t xlevel2;\n+    uint32_t cpuid_7_0_ebx_features;\n+};\n+\n+struct CPUState;\n+\n+void init_cpuid(struct CPUState* cpu);\n+void get_cpuid_func(struct CPUState *cpu, int func, int cnt, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);\n+\n+#endif /* __CPUID_H__ */\n+\ndiff --git a/target/i386/hvf-utils/x86_decode.c b/target/i386/hvf-utils/x86_decode.c\nnew file mode 100644\nindex 0000000000..b4d8e22449\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_decode.c\n@@ -0,0 +1,1659 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#include \"qemu/osdep.h\"\n+\n+#include \"x86_decode.h\"\n+#include \"string.h\"\n+#include \"vmx.h\"\n+#include \"x86_gen.h\"\n+#include \"x86_mmu.h\"\n+#include \"x86_descr.h\"\n+\n+#define OPCODE_ESCAPE   0xf\n+\n+static void decode_invalid(CPUState *cpu, struct x86_decode *decode)\n+{\n+    printf(\"%llx: failed to decode instruction \", cpu->hvf_x86->fetch_rip - decode->len);\n+    for (int i = 0; i < decode->opcode_len; i++)\n+        printf(\"%x \", decode->opcode[i]);\n+    printf(\"\\n\");\n+    VM_PANIC(\"decoder failed\\n\");\n+}\n+\n+uint64_t sign(uint64_t val, int size)\n+{\n+    switch (size) {\n+        case 1:\n+            val = (int8_t)val;\n+            break;\n+        case 2:\n+            val = (int16_t)val;\n+            break;\n+        case 4:\n+            val = (int32_t)val;\n+            break;\n+        case 8:\n+            val = (int64_t)val;\n+            break;\n+        default:\n+            VM_PANIC_EX(\"%s invalid size %d\\n\", __FUNCTION__, size);\n+            break;\n+    }\n+    return val;\n+}\n+\n+static inline uint64_t decode_bytes(CPUState *cpu, struct x86_decode *decode, int size)\n+{\n+    addr_t val = 0;\n+    \n+    switch (size) {\n+        case 1:\n+        case 2:\n+        case 4:\n+        case 8:\n+            break;\n+        default:\n+            VM_PANIC_EX(\"%s invalid size %d\\n\", __FUNCTION__, size);\n+            break;\n+    }\n+    addr_t va  = linear_rip(cpu, RIP(cpu)) + decode->len;\n+    vmx_read_mem(cpu, &val, va, size);\n+    decode->len += size;\n+    \n+    return val;\n+}\n+\n+static inline uint8_t decode_byte(CPUState *cpu, struct x86_decode *decode)\n+{\n+    return (uint8_t)decode_bytes(cpu, decode, 1);\n+}\n+\n+static inline uint16_t decode_word(CPUState *cpu, struct x86_decode *decode)\n+{\n+    return (uint16_t)decode_bytes(cpu, decode, 2);\n+}\n+\n+static inline uint32_t decode_dword(CPUState *cpu, struct x86_decode *decode)\n+{\n+    return (uint32_t)decode_bytes(cpu, decode, 4);\n+}\n+\n+static inline uint64_t decode_qword(CPUState *cpu, struct x86_decode *decode)\n+{\n+    return decode_bytes(cpu, decode, 8);\n+}\n+\n+static void decode_modrm_rm(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_RM;\n+}\n+\n+static void decode_modrm_reg(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_REG;\n+    op->reg = decode->modrm.reg;\n+    op->ptr = get_reg_ref(cpu, op->reg, decode->rex.r, decode->operand_size);\n+}\n+\n+static void decode_rax(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_REG;\n+    op->reg = REG_RAX;\n+    op->ptr = get_reg_ref(cpu, op->reg, 0, decode->operand_size);\n+}\n+\n+static inline void decode_immediate(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *var, int size)\n+{\n+    var->type = X86_VAR_IMMEDIATE;\n+    var->size = size;\n+    switch (size) {\n+        case 1:\n+            var->val = decode_byte(cpu, decode);\n+            break;\n+        case 2:\n+            var->val = decode_word(cpu, decode);\n+            break;\n+        case 4:\n+            var->val = decode_dword(cpu, decode);\n+            break;\n+        case 8:\n+            var->val = decode_qword(cpu, decode);\n+            break;\n+        default:\n+            VM_PANIC_EX(\"bad size %d\\n\", size);\n+    }\n+}\n+\n+static void decode_imm8(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    decode_immediate(cpu, decode, op, 1);\n+    op->type = X86_VAR_IMMEDIATE;\n+}\n+\n+static void decode_imm8_signed(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    decode_immediate(cpu, decode, op, 1);\n+    op->val = sign(op->val, 1);\n+    op->type = X86_VAR_IMMEDIATE;\n+}\n+\n+static void decode_imm16(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    decode_immediate(cpu, decode, op, 2);\n+    op->type = X86_VAR_IMMEDIATE;\n+}\n+\n+\n+static void decode_imm(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    if (8 == decode->operand_size) {\n+        decode_immediate(cpu, decode, op, 4);\n+        op->val = sign(op->val, decode->operand_size);\n+    } else {\n+        decode_immediate(cpu, decode, op, decode->operand_size);\n+    }\n+    op->type = X86_VAR_IMMEDIATE;\n+}\n+\n+static void decode_imm_signed(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    decode_immediate(cpu, decode, op, decode->operand_size);\n+    op->val = sign(op->val, decode->operand_size);\n+    op->type = X86_VAR_IMMEDIATE;\n+}\n+\n+static void decode_imm_1(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_IMMEDIATE;\n+    op->val = 1;\n+}\n+\n+static void decode_imm_0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_IMMEDIATE;\n+    op->val = 0;\n+}\n+\n+\n+static void decode_pushseg(CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n+    \n+    decode->op[0].type = X86_VAR_REG;\n+    switch (op) {\n+        case 0xe:\n+            decode->op[0].reg = REG_SEG_CS;\n+            break;\n+        case 0x16:\n+            decode->op[0].reg = REG_SEG_SS;\n+            break;\n+        case 0x1e:\n+            decode->op[0].reg = REG_SEG_DS;\n+            break;\n+        case 0x06:\n+            decode->op[0].reg = REG_SEG_ES;\n+            break;\n+        case 0xa0:\n+            decode->op[0].reg = REG_SEG_FS;\n+            break;\n+        case 0xa8:\n+            decode->op[0].reg = REG_SEG_GS;\n+            break;\n+    }\n+}\n+\n+static void decode_popseg(CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n+    \n+    decode->op[0].type = X86_VAR_REG;\n+    switch (op) {\n+        case 0xf:\n+            decode->op[0].reg = REG_SEG_CS;\n+            break;\n+        case 0x17:\n+            decode->op[0].reg = REG_SEG_SS;\n+            break;\n+        case 0x1f:\n+            decode->op[0].reg = REG_SEG_DS;\n+            break;\n+        case 0x07:\n+            decode->op[0].reg = REG_SEG_ES;\n+            break;\n+        case 0xa1:\n+            decode->op[0].reg = REG_SEG_FS;\n+            break;\n+        case 0xa9:\n+            decode->op[0].reg = REG_SEG_GS;\n+            break;\n+    }\n+}\n+\n+static void decode_incgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0x40;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_decgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0x48;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_incgroup2(CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (!decode->modrm.reg)\n+        decode->cmd = X86_DECODE_CMD_INC;\n+    else if (1 == decode->modrm.reg)\n+        decode->cmd = X86_DECODE_CMD_DEC;\n+}\n+\n+static void decode_pushgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0x50;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_popgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0x58;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_jxx(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->displacement = decode_bytes(cpu, decode, decode->operand_size);\n+    decode->displacement_size = decode->operand_size;\n+}\n+\n+static void decode_farjmp(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_IMMEDIATE;\n+    decode->op[0].val = decode_bytes(cpu, decode, decode->operand_size);\n+    decode->displacement = decode_word(cpu, decode);\n+}\n+\n+static void decode_addgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_ADD,\n+        X86_DECODE_CMD_OR,\n+        X86_DECODE_CMD_ADC,\n+        X86_DECODE_CMD_SBB,\n+        X86_DECODE_CMD_AND,\n+        X86_DECODE_CMD_SUB,\n+        X86_DECODE_CMD_XOR,\n+        X86_DECODE_CMD_CMP\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+}\n+\n+static void decode_rotgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_ROL,\n+        X86_DECODE_CMD_ROR,\n+        X86_DECODE_CMD_RCL,\n+        X86_DECODE_CMD_RCR,\n+        X86_DECODE_CMD_SHL,\n+        X86_DECODE_CMD_SHR,\n+        X86_DECODE_CMD_SHL,\n+        X86_DECODE_CMD_SAR\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+}\n+\n+static void decode_f7group(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_TST,\n+        X86_DECODE_CMD_TST,\n+        X86_DECODE_CMD_NOT,\n+        X86_DECODE_CMD_NEG,\n+        X86_DECODE_CMD_MUL,\n+        X86_DECODE_CMD_IMUL_1,\n+        X86_DECODE_CMD_DIV,\n+        X86_DECODE_CMD_IDIV\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+    decode_modrm_rm(cpu, decode, &decode->op[0]);\n+\n+    switch (decode->modrm.reg) {\n+        case 0:\n+        case 1:\n+            decode_imm(cpu, decode, &decode->op[1]);\n+            break;\n+        case 2:\n+            break;\n+        case 3:\n+            decode->op[1].type = X86_VAR_IMMEDIATE;\n+            decode->op[1].val = 0;\n+            break;\n+        default:\n+            break;\n+    }\n+}\n+\n+static void decode_xchgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0x90;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_movgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0xb8;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+    decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n+}\n+\n+static void fetch_moffs(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_OFFSET;\n+    op->ptr = decode_bytes(cpu, decode, decode->addressing_size);\n+}\n+\n+static void decode_movgroup8(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[0] - 0xb0;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+    decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n+}\n+\n+static void decode_rcx(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X86_VAR_REG;\n+    op->reg = REG_RCX;\n+    op->ptr = get_reg_ref(cpu, op->reg, decode->rex.b, decode->operand_size);\n+}\n+\n+struct decode_tbl {\n+    uint8_t opcode;\n+    enum x86_decode_cmd cmd;\n+    uint8_t operand_size;\n+    bool is_modrm;\n+    void (*decode_op1)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op1);\n+    void (*decode_op2)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op2);\n+    void (*decode_op3)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op3);\n+    void (*decode_op4)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op4);\n+    void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n+    addr_t flags_mask;\n+};\n+\n+struct decode_x87_tbl {\n+    uint8_t opcode;\n+    uint8_t modrm_reg;\n+    uint8_t modrm_mod;\n+    enum x86_decode_cmd cmd;\n+    uint8_t operand_size;\n+    bool rev;\n+    bool pop;\n+    void (*decode_op1)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op1);\n+    void (*decode_op2)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op2);\n+    void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n+    addr_t flags_mask;\n+};\n+\n+struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL, decode_invalid};\n+\n+struct decode_tbl _decode_tbl1[255];\n+struct decode_tbl _decode_tbl2[255];\n+struct decode_x87_tbl _decode_tbl3[255];\n+\n+static void decode_x87_ins(CPUState *cpu, struct x86_decode *decode)\n+{\n+    struct decode_x87_tbl *decoder;\n+    \n+    decode->is_fpu = true;\n+    int mode = decode->modrm.mod == 3 ? 1 : 0;\n+    int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) | decode->modrm.reg;\n+    \n+    decoder = &_decode_tbl3[index];\n+    \n+    decode->cmd = decoder->cmd;\n+    if (decoder->operand_size)\n+        decode->operand_size = decoder->operand_size;\n+    decode->flags_mask = decoder->flags_mask;\n+    decode->fpop_stack = decoder->pop;\n+    decode->frev = decoder->rev;\n+    \n+    if (decoder->decode_op1)\n+        decoder->decode_op1(cpu, decode, &decode->op[0]);\n+    if (decoder->decode_op2)\n+        decoder->decode_op2(cpu, decode, &decode->op[1]);\n+    if (decoder->decode_postfix)\n+        decoder->decode_postfix(cpu, decode);\n+    \n+    VM_PANIC_ON_EX(!decode->cmd, \"x87 opcode %x %x (%x %x) not decoded\\n\", decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg, decoder->modrm_mod);\n+}\n+\n+static void decode_ffgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_INC,\n+        X86_DECODE_CMD_DEC,\n+        X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,\n+        X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,\n+        X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,\n+        X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,\n+        X86_DECODE_CMD_PUSH,\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_INVL\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+    if (decode->modrm.reg > 2)\n+        decode->flags_mask = 0;\n+}\n+\n+static void decode_sldtgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_SLDT,\n+        X86_DECODE_CMD_STR,\n+        X86_DECODE_CMD_LLDT,\n+        X86_DECODE_CMD_LTR,\n+        X86_DECODE_CMD_VERR,\n+        X86_DECODE_CMD_VERW,\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_INVL\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+    printf(\"%llx: decode_sldtgroup: %d\\n\", cpu->hvf_x86->fetch_rip, decode->modrm.reg);\n+}\n+\n+static void decode_lidtgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_SGDT,\n+        X86_DECODE_CMD_SIDT,\n+        X86_DECODE_CMD_LGDT,\n+        X86_DECODE_CMD_LIDT,\n+        X86_DECODE_CMD_SMSW,\n+        X86_DECODE_CMD_LMSW,\n+        X86_DECODE_CMD_LMSW,\n+        X86_DECODE_CMD_INVLPG\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+    if (0xf9 == decode->modrm.modrm) {\n+        decode->opcode[decode->len++] = decode->modrm.modrm;\n+        decode->cmd = X86_DECODE_CMD_RDTSCP;\n+    }\n+}\n+\n+static void decode_btgroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    enum x86_decode_cmd group[] = {\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_INVL,\n+        X86_DECODE_CMD_BT,\n+        X86_DECODE_CMD_BTS,\n+        X86_DECODE_CMD_BTR,\n+        X86_DECODE_CMD_BTC\n+    };\n+    decode->cmd = group[decode->modrm.reg];\n+}\n+\n+static void decode_x87_general(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->is_fpu = true;\n+}\n+\n+static void decode_x87_modrm_floatp(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X87_VAR_FLOATP;\n+}\n+\n+static void decode_x87_modrm_intp(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X87_VAR_INTP;\n+}\n+\n+static void decode_x87_modrm_bytep(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X87_VAR_BYTEP;\n+}\n+\n+static void decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X87_VAR_REG;\n+    op->reg = 0;\n+}\n+\n+static void decode_decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    op->type = X87_VAR_REG;\n+    op->reg = decode->modrm.modrm & 7;\n+}\n+\n+\n+static void decode_aegroup(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->is_fpu = true;\n+    switch (decode->modrm.reg) {\n+        case 0:\n+            decode->cmd = X86_DECODE_CMD_FXSAVE;\n+            decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+            break;\n+        case 1:\n+            decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+            decode->cmd = X86_DECODE_CMD_FXRSTOR;\n+            break;\n+        case 5:\n+            if (decode->modrm.modrm == 0xe8) {\n+                decode->cmd = X86_DECODE_CMD_LFENCE;\n+            } else {\n+                VM_PANIC(\"xrstor\");\n+            }\n+            break;\n+        case 6:\n+            VM_PANIC_ON(decode->modrm.modrm != 0xf0);\n+            decode->cmd = X86_DECODE_CMD_MFENCE;\n+            break;\n+        case 7:\n+            if (decode->modrm.modrm == 0xf8) {\n+                decode->cmd = X86_DECODE_CMD_SFENCE;\n+            } else {\n+                decode->cmd = X86_DECODE_CMD_CLFLUSH;\n+            }\n+            break;\n+        default:\n+            VM_PANIC_ON_EX(1, \"0xae: reg %d\\n\", decode->modrm.reg);\n+            break;\n+    }\n+}\n+\n+static void decode_bswap(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = decode->opcode[1] - 0xc8;\n+    decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+}\n+\n+static void decode_d9_4(CPUState *cpu, struct x86_decode *decode)\n+{\n+    switch(decode->modrm.modrm) {\n+        case 0xe0:\n+            // FCHS\n+            decode->cmd = X86_DECODE_CMD_FCHS;\n+            break;\n+        case 0xe1:\n+            decode->cmd = X86_DECODE_CMD_FABS;\n+            break;\n+        case 0xe4:\n+            VM_PANIC_ON_EX(1, \"FTST\");\n+            break;\n+        case 0xe5:\n+            // FXAM\n+            decode->cmd = X86_DECODE_CMD_FXAM;\n+            break;\n+        default:\n+            VM_PANIC_ON_EX(1, \"FLDENV\");\n+            break;\n+    }\n+}\n+\n+static void decode_db_4(CPUState *cpu, struct x86_decode *decode)\n+{\n+    switch (decode->modrm.modrm) {\n+        case 0xe0:\n+            VM_PANIC_ON_EX(1, \"unhandled FNENI: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n+            break;\n+        case 0xe1:\n+            VM_PANIC_ON_EX(1, \"unhandled FNDISI: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n+            break;\n+        case 0xe2:\n+            VM_PANIC_ON_EX(1, \"unhandled FCLEX: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n+            break;\n+        case 0xe3:\n+            decode->cmd = X86_DECODE_CMD_FNINIT;\n+            break;\n+        case 0xe4:\n+            decode->cmd = X86_DECODE_CMD_FNSETPM;\n+            break;\n+        default:\n+            VM_PANIC_ON_EX(1, \"unhandled fpu opcode: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n+            break;\n+    }\n+}\n+\n+\n+#define RFLAGS_MASK_NONE    0\n+#define RFLAGS_MASK_OSZAPC  (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | RFLAGS_CF)\n+#define RFLAGS_MASK_LAHF    (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | RFLAGS_CF)\n+#define RFLAGS_MASK_CF      (RFLAGS_CF)\n+#define RFLAGS_MASK_IF      (RFLAGS_IF)\n+#define RFLAGS_MASK_TF      (RFLAGS_TF)\n+#define RFLAGS_MASK_DF      (RFLAGS_DF)\n+#define RFLAGS_MASK_ZF      (RFLAGS_ZF)\n+\n+struct decode_tbl _1op_inst[] =\n+{\n+    {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    \n+    {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    \n+    {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    \n+    {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x2f, X86_DECODE_CMD_DAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x3f, X86_DECODE_CMD_AAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x40, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x41, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x42, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x43, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x44, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x45, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x46, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    {0x47, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x48, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x49, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4a, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4b, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4c, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4d, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4e, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    {0x4f, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x50, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x51, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x52, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x53, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x54, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x55, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x56, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    {0x57, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+    \n+    {0x58, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x59, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5a, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5b, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5c, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5d, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5e, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    {0x5f, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+    \n+    {0x60, X86_DECODE_CMD_PUSHA, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x61, X86_DECODE_CMD_POPA, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0x6c, X86_DECODE_CMD_INS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x6d, X86_DECODE_CMD_INS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x6e, X86_DECODE_CMD_OUTS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x6f, X86_DECODE_CMD_OUTS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0x70, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x71, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x72, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x73, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x74, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x75, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x76, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x77, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x78, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x79, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7a, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7b, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7c, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7d, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7e, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x7f, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    \n+    {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+    {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+    {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+    {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+    {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0x90, X86_DECODE_CMD_NOP, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+    \n+    {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n+    \n+    {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    //{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_POPF},\n+    {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_LAHF},\n+    \n+    {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+    \n+    {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xba, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+    \n+    {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    //{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IRET},\n+    \n+    {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xd7, X86_DECODE_CMD_XLAT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xda, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xde, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+    \n+    {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xe3, X86_DECODE_CMD_JCXZ, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    \n+    {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xea, X86_DECODE_CMD_JMP_FAR, 0, false, NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n+    {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xec, X86_DECODE_CMD_IN, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xed, X86_DECODE_CMD_IN, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xee, X86_DECODE_CMD_OUT, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xef, X86_DECODE_CMD_OUT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xf4, X86_DECODE_CMD_HLT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xf5, X86_DECODE_CMD_CMC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+    \n+    {0xf6, X86_DECODE_CMD_INVL, 1, true, NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n+    {0xf7, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xf8, X86_DECODE_CMD_CLC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+    {0xf9, X86_DECODE_CMD_STC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+    \n+    {0xfa, X86_DECODE_CMD_CLI, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n+    {0xfb, X86_DECODE_CMD_STI, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n+    {0xfc, X86_DECODE_CMD_CLD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n+    {0xfd, X86_DECODE_CMD_STD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n+    {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},\n+    {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},\n+};\n+\n+struct decode_tbl _2op_inst[] =\n+{\n+    {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},\n+    {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},\n+    {0x6, X86_DECODE_CMD_CLTS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},\n+    {0x9, X86_DECODE_CMD_WBINVD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x18, X86_DECODE_CMD_PREFETCH, 0, true, NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n+    {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x30, X86_DECODE_CMD_WRMSR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x31, X86_DECODE_CMD_RDTSC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x32, X86_DECODE_CMD_RDMSR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x77, X86_DECODE_CMD_EMMS, 0, false, NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n+    {0x82, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x83, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x84, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x85, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x86, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x87, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x88, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x89, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8a, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8b, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8c, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8d, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8e, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x8f, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+    {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    {0xa2, X86_DECODE_CMD_CPUID, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+    {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+    {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+    {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+    {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},\n+    \n+    {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},\n+    {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+    \n+    {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},\n+    \n+    {0xc8, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xc9, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xca, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xcb, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xcc, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xcd, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xce, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+    {0xcf, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+};\n+\n+struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL, NULL, decode_invalid, 0};\n+\n+struct decode_x87_tbl _x87_inst[] =\n+{\n+    {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},\n+    {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    //\n+    {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, decode_db_4, RFLAGS_MASK_NONE},\n+    {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+    \n+    {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+    {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+};\n+\n+void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    addr_t ptr = 0;\n+    x86_reg_segment seg = REG_SEG_DS;\n+\n+    if (!decode->modrm.mod && 6 == decode->modrm.rm) {\n+        op->ptr = (uint16_t)decode->displacement;\n+        goto calc_addr;\n+    }\n+\n+    if (decode->displacement_size)\n+        ptr = sign(decode->displacement, decode->displacement_size);\n+\n+    switch (decode->modrm.rm) {\n+        case 0:\n+            ptr += BX(cpu) + SI(cpu);\n+            break;\n+        case 1:\n+            ptr += BX(cpu) + DI(cpu);\n+            break;\n+        case 2:\n+            ptr += BP(cpu) + SI(cpu);\n+            seg = REG_SEG_SS;\n+            break;\n+        case 3:\n+            ptr += BP(cpu) + DI(cpu);\n+            seg = REG_SEG_SS;\n+            break;\n+        case 4:\n+            ptr += SI(cpu);\n+            break;\n+        case 5:\n+            ptr += DI(cpu);\n+            break;\n+        case 6:\n+            ptr += BP(cpu);\n+            seg = REG_SEG_SS;\n+            break;\n+        case 7:\n+            ptr += BX(cpu);\n+            break;\n+    }\n+calc_addr:\n+    if (X86_DECODE_CMD_LEA == decode->cmd)\n+        op->ptr = (uint16_t)ptr;\n+    else\n+        op->ptr = decode_linear_addr(cpu, decode, (uint16_t)ptr, seg);\n+}\n+\n+addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n+{\n+    addr_t ptr = 0;\n+    int which = 0;\n+\n+    if (is_extended)\n+        reg |= REG_R8;\n+\n+\n+    switch (size) {\n+        case 1:\n+            if (is_extended || reg < 4) {\n+                which = 1;\n+                ptr = (addr_t)&RL(cpu, reg);\n+            } else {\n+                which = 2;\n+                ptr = (addr_t)&RH(cpu, reg - 4);\n+            }\n+            break;\n+        default:\n+            which = 3;\n+            ptr = (addr_t)&RRX(cpu, reg);\n+            break;\n+    }\n+    return ptr;\n+}\n+\n+addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size)\n+{\n+    addr_t val = 0;\n+    memcpy(&val, (void*)get_reg_ref(cpu, reg, is_extended, size), size);\n+    return val;\n+}\n+\n+static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode, x86_reg_segment *sel)\n+{\n+    addr_t base = 0;\n+    addr_t scaled_index = 0;\n+    int addr_size = decode->addressing_size;\n+    int base_reg = decode->sib.base;\n+    int index_reg = decode->sib.index;\n+\n+    *sel = REG_SEG_DS;\n+\n+    if (decode->modrm.mod || base_reg != REG_RBP) {\n+        if (decode->rex.b)\n+            base_reg |= REG_R8;\n+        if (REG_RSP == base_reg || REG_RBP == base_reg)\n+            *sel = REG_SEG_SS;\n+        base = get_reg_val(cpu, decode->sib.base, decode->rex.b, addr_size);\n+    }\n+\n+    if (decode->rex.x)\n+        index_reg |= REG_R8;\n+\n+    if (index_reg != REG_RSP)\n+        scaled_index = get_reg_val(cpu, index_reg, decode->rex.x, addr_size) << decode->sib.scale;\n+    return base + scaled_index;\n+}\n+\n+void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    x86_reg_segment seg = REG_SEG_DS;\n+    addr_t ptr = 0;\n+    int addr_size = decode->addressing_size;\n+\n+    if (decode->displacement_size)\n+        ptr = sign(decode->displacement, decode->displacement_size);\n+\n+    if (4 == decode->modrm.rm) {\n+        ptr += get_sib_val(cpu, decode, &seg);\n+    }\n+    else if (!decode->modrm.mod && 5 == decode->modrm.rm) {\n+        if (x86_is_long_mode(cpu))\n+            ptr += RIP(cpu) + decode->len;\n+        else\n+            ptr = decode->displacement;\n+    }\n+    else {\n+        if (REG_RBP == decode->modrm.rm || REG_RSP == decode->modrm.rm)\n+            seg = REG_SEG_SS;\n+        ptr += get_reg_val(cpu, decode->modrm.rm, decode->rex.b, addr_size);\n+    }\n+\n+    if (X86_DECODE_CMD_LEA == decode->cmd)\n+        op->ptr = (uint32_t)ptr;\n+    else\n+        op->ptr = decode_linear_addr(cpu, decode, (uint32_t)ptr, seg);\n+}\n+\n+void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    x86_reg_segment seg = REG_SEG_DS;\n+    int32_t offset = 0;\n+    int mod = decode->modrm.mod;\n+    int rm = decode->modrm.rm;\n+    addr_t ptr;\n+    int src = decode->modrm.rm;\n+    \n+    if (decode->displacement_size)\n+        offset = sign(decode->displacement, decode->displacement_size);\n+\n+    if (4 == rm)\n+        ptr = get_sib_val(cpu, decode, &seg) + offset;\n+    else if (0 == mod && 5 == rm)\n+        ptr = RIP(cpu) + decode->len + (int32_t) offset;\n+    else\n+        ptr = get_reg_val(cpu, src, decode->rex.b, 8) + (int64_t) offset;\n+    \n+    if (X86_DECODE_CMD_LEA == decode->cmd)\n+        op->ptr = ptr;\n+    else\n+        op->ptr = decode_linear_addr(cpu, decode, ptr, seg);\n+}\n+\n+\n+void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+{\n+    if (3 == decode->modrm.mod) {\n+        op->reg = decode->modrm.reg;\n+        op->type = X86_VAR_REG;\n+        op->ptr = get_reg_ref(cpu, decode->modrm.rm, decode->rex.b, decode->operand_size);\n+        return;\n+    }\n+\n+    switch (decode->addressing_size) {\n+        case 2:\n+            calc_modrm_operand16(cpu, decode, op);\n+            break;\n+        case 4:\n+            calc_modrm_operand32(cpu, decode, op);\n+            break;\n+        case 8:\n+            calc_modrm_operand64(cpu, decode, op);\n+            break;\n+        default:\n+            VM_PANIC_EX(\"unsupported address size %d\\n\", decode->addressing_size);\n+            break;\n+    }\n+}\n+\n+static void decode_prefix(CPUState *cpu, struct x86_decode *decode)\n+{\n+    while (1) {\n+        uint8_t byte = decode_byte(cpu, decode);\n+        switch (byte) {\n+            case PREFIX_LOCK:\n+                decode->lock = byte;\n+                break;\n+            case PREFIX_REPN:\n+            case PREFIX_REP:\n+                decode->rep = byte;\n+                break;\n+            case PREFIX_CS_SEG_OVEERIDE:\n+            case PREFIX_SS_SEG_OVEERIDE:\n+            case PREFIX_DS_SEG_OVEERIDE:\n+            case PREFIX_ES_SEG_OVEERIDE:\n+            case PREFIX_FS_SEG_OVEERIDE:\n+            case PREFIX_GS_SEG_OVEERIDE:\n+                decode->segment_override = byte;\n+                break;\n+            case PREFIX_OP_SIZE_OVERRIDE:\n+                decode->op_size_override = byte;\n+                break;\n+            case PREFIX_ADDR_SIZE_OVERRIDE:\n+                decode->addr_size_override = byte;\n+                break;\n+            case PREFIX_REX ... (PREFIX_REX + 0xf):\n+                if (x86_is_long_mode(cpu)) {\n+                    decode->rex.rex = byte;\n+                    break;\n+                }\n+                // fall through when not in long mode\n+            default:\n+                decode->len--;\n+                return;\n+        }\n+    }\n+}\n+\n+void set_addressing_size(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->addressing_size = -1;\n+    if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n+        if (decode->addr_size_override)\n+            decode->addressing_size = 4;\n+        else\n+            decode->addressing_size = 2;\n+    }\n+    else if (!x86_is_long_mode(cpu)) {\n+        // protected\n+        struct vmx_segment cs;\n+        vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n+        // check db\n+        if ((cs.ar >> 14) & 1) {\n+            if (decode->addr_size_override)\n+                decode->addressing_size = 2;\n+            else\n+                decode->addressing_size = 4;\n+        } else {\n+            if (decode->addr_size_override)\n+                decode->addressing_size = 4;\n+            else\n+                decode->addressing_size = 2;\n+        }\n+    } else {\n+        // long\n+        if (decode->addr_size_override)\n+            decode->addressing_size = 4;\n+        else\n+            decode->addressing_size = 8;\n+    }\n+}\n+\n+void set_operand_size(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->operand_size = -1;\n+    if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n+        if (decode->op_size_override)\n+            decode->operand_size = 4;\n+        else\n+            decode->operand_size = 2;\n+    }\n+    else if (!x86_is_long_mode(cpu)) {\n+        // protected\n+        struct vmx_segment cs;\n+        vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n+        // check db\n+        if ((cs.ar >> 14) & 1) {\n+            if (decode->op_size_override)\n+                decode->operand_size = 2;\n+            else\n+                decode->operand_size = 4;\n+        } else {\n+            if (decode->op_size_override)\n+                decode->operand_size = 4;\n+            else\n+                decode->operand_size = 2;\n+        }\n+    } else {\n+        // long\n+        if (decode->op_size_override)\n+            decode->operand_size = 2;\n+        else\n+            decode->operand_size = 4;\n+\n+        if (decode->rex.w)\n+            decode->operand_size = 8;\n+    }\n+}\n+\n+static void decode_sib(CPUState *cpu, struct x86_decode *decode)\n+{\n+    if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) && (decode->addressing_size != 2)) {\n+        decode->sib.sib = decode_byte(cpu, decode);\n+        decode->sib_present = true;\n+    }\n+}\n+\n+/* 16 bit modrm\n+ * mod                               R/M\n+ * 00\t[BX+SI]         [BX+DI]         [BP+SI]         [BP+DI]         [SI]        [DI]        [disp16]\t[BX]\n+ * 01\t[BX+SI+disp8]\t[BX+DI+disp8]\t[BP+SI+disp8]\t[BP+DI+disp8]\t[SI+disp8]\t[DI+disp8]\t[BP+disp8]\t[BX+disp8]\n+ * 10\t[BX+SI+disp16]\t[BX+DI+disp16]\t[BP+SI+disp16]\t[BP+DI+disp16]\t[SI+disp16]\t[DI+disp16]\t[BP+disp16]\t[BX+disp16]\n+ * 11     -               -              -                -               -          -            -          -\n+ */\n+int disp16_tbl[4][8] =\n+    {{0, 0, 0, 0, 0, 0, 2, 0},\n+    {1, 1, 1, 1, 1, 1, 1, 1},\n+    {2, 2, 2, 2, 2, 2, 2, 2},\n+    {0, 0, 0, 0, 0, 0, 0, 0}};\n+\n+/*\n+ 32/64-bit\t modrm\n+ Mod\n+ 00     [r/m]        [r/m]        [r/m]        [r/m]        [SIB]        [RIP/EIP1,2+disp32]   [r/m]         [r/m]\n+ 01     [r/m+disp8]  [r/m+disp8]  [r/m+disp8]  [r/m+disp8]  [SIB+disp8]  [r/m+disp8]           [SIB+disp8]   [r/m+disp8]\n+ 10     [r/m+disp32] [r/m+disp32] [r/m+disp32] [r/m+disp32] [SIB+disp32] [r/m+disp32]          [SIB+disp32]\t [r/m+disp32]\n+ 11     -            -             -           -            -            -                      -             -\n+ */\n+int disp32_tbl[4][8] =\n+    {{0, 0, 0, 0, -1, 4, 0, 0},\n+    {1, 1, 1, 1, 1, 1, 1, 1},\n+    {4, 4, 4, 4, 4, 4, 4, 4},\n+    {0, 0, 0, 0, 0, 0, 0, 0}};\n+\n+static inline void decode_displacement(CPUState *cpu, struct x86_decode *decode)\n+{\n+    int addressing_size = decode->addressing_size;\n+    int mod = decode->modrm.mod;\n+    int rm = decode->modrm.rm;\n+    \n+    decode->displacement_size = 0;\n+    switch (addressing_size) {\n+        case 2:\n+            decode->displacement_size = disp16_tbl[mod][rm];\n+            if (decode->displacement_size)\n+                decode->displacement = (uint16_t)decode_bytes(cpu, decode, decode->displacement_size);\n+            break;\n+        case 4:\n+        case 8:\n+            if (-1 == disp32_tbl[mod][rm]) {\n+                if (5 == decode->sib.base)\n+                    decode->displacement_size = 4;\n+            }\n+            else\n+                decode->displacement_size = disp32_tbl[mod][rm];\n+            \n+            if (decode->displacement_size)\n+                decode->displacement = (uint32_t)decode_bytes(cpu, decode, decode->displacement_size);\n+            break;\n+    }\n+}\n+\n+static inline void decode_modrm(CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->modrm.modrm = decode_byte(cpu, decode);\n+    decode->is_modrm = true;\n+    \n+    decode_sib(cpu, decode);\n+    decode_displacement(cpu, decode);\n+}\n+\n+static inline void decode_opcode_general(CPUState *cpu, struct x86_decode *decode, uint8_t opcode, struct decode_tbl *inst_decoder)\n+{\n+    decode->cmd = inst_decoder->cmd;\n+    if (inst_decoder->operand_size)\n+        decode->operand_size = inst_decoder->operand_size;\n+    decode->flags_mask = inst_decoder->flags_mask;\n+    \n+    if (inst_decoder->is_modrm)\n+        decode_modrm(cpu, decode);\n+    if (inst_decoder->decode_op1)\n+        inst_decoder->decode_op1(cpu, decode, &decode->op[0]);\n+    if (inst_decoder->decode_op2)\n+        inst_decoder->decode_op2(cpu, decode, &decode->op[1]);\n+    if (inst_decoder->decode_op3)\n+        inst_decoder->decode_op3(cpu, decode, &decode->op[2]);\n+    if (inst_decoder->decode_op4)\n+        inst_decoder->decode_op4(cpu, decode, &decode->op[3]);\n+    if (inst_decoder->decode_postfix)\n+        inst_decoder->decode_postfix(cpu, decode);\n+}\n+\n+static inline void decode_opcode_1(CPUState *cpu, struct x86_decode *decode, uint8_t opcode)\n+{\n+    struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];\n+    decode_opcode_general(cpu, decode, opcode, inst_decoder);\n+}\n+\n+\n+static inline void decode_opcode_2(CPUState *cpu, struct x86_decode *decode, uint8_t opcode)\n+{\n+    struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];\n+    decode_opcode_general(cpu, decode, opcode, inst_decoder);\n+}\n+\n+static void decode_opcodes(CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t opcode;\n+    \n+    opcode = decode_byte(cpu, decode);\n+    decode->opcode[decode->opcode_len++] = opcode;\n+    if (opcode != OPCODE_ESCAPE) {\n+        decode_opcode_1(cpu, decode, opcode);\n+    } else {\n+        opcode = decode_byte(cpu, decode);\n+        decode->opcode[decode->opcode_len++] = opcode;\n+        decode_opcode_2(cpu, decode, opcode);\n+    }\n+}\n+\n+uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode)\n+{\n+    ZERO_INIT(*decode);\n+\n+    decode_prefix(cpu, decode);\n+    set_addressing_size(cpu, decode);\n+    set_operand_size(cpu, decode);\n+\n+    decode_opcodes(cpu, decode);\n+    \n+    return decode->len;\n+}\n+\n+void init_decoder(CPUState *cpu)\n+{\n+    int i;\n+    \n+    for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++)\n+        memcpy(_decode_tbl1, &invl_inst, sizeof(invl_inst));\n+    for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++)\n+        memcpy(_decode_tbl2, &invl_inst, sizeof(invl_inst));\n+    for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++)\n+        memcpy(_decode_tbl3, &invl_inst, sizeof(invl_inst_x87));\n+    \n+    for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {\n+        _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];\n+    }\n+    for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) {\n+        _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];\n+    }\n+    for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {\n+        int index = ((_x87_inst[i].opcode & 0xf) << 4) | ((_x87_inst[i].modrm_mod & 1) << 3) | _x87_inst[i].modrm_reg;\n+        _decode_tbl3[index] = _x87_inst[i];\n+    }\n+}\n+\n+\n+const char *decode_cmd_to_string(enum x86_decode_cmd cmd)\n+{\n+    static const char *cmds[] = {\"INVL\", \"PUSH\", \"PUSH_SEG\", \"POP\", \"POP_SEG\", \"MOV\", \"MOVSX\", \"MOVZX\", \"CALL_NEAR\",\n+        \"CALL_NEAR_ABS_INDIRECT\", \"CALL_FAR_ABS_INDIRECT\", \"CMD_CALL_FAR\", \"RET_NEAR\", \"RET_FAR\", \"ADD\", \"OR\",\n+        \"ADC\", \"SBB\", \"AND\", \"SUB\", \"XOR\", \"CMP\", \"INC\", \"DEC\", \"TST\", \"NOT\", \"NEG\", \"JMP_NEAR\", \"JMP_NEAR_ABS_INDIRECT\",\n+        \"JMP_FAR\", \"JMP_FAR_ABS_INDIRECT\", \"LEA\", \"JXX\",\n+        \"JCXZ\", \"SETXX\", \"MOV_TO_SEG\", \"MOV_FROM_SEG\", \"CLI\", \"STI\", \"CLD\", \"STD\", \"STC\",\n+        \"CLC\", \"OUT\", \"IN\", \"INS\", \"OUTS\", \"LIDT\", \"SIDT\", \"LGDT\", \"SGDT\", \"SMSW\", \"LMSW\", \"RDTSCP\", \"INVLPG\", \"MOV_TO_CR\",\n+        \"MOV_FROM_CR\", \"MOV_TO_DR\", \"MOV_FROM_DR\", \"PUSHF\", \"POPF\", \"CPUID\", \"ROL\", \"ROR\", \"RCL\", \"RCR\", \"SHL\", \"SAL\",\n+        \"SHR\",\"SHRD\", \"SHLD\", \"SAR\", \"DIV\", \"IDIV\", \"MUL\", \"IMUL_3\", \"IMUL_2\", \"IMUL_1\", \"MOVS\", \"CMPS\", \"SCAS\",\n+        \"LODS\", \"STOS\", \"BSWAP\", \"XCHG\", \"RDTSC\", \"RDMSR\", \"WRMSR\", \"ENTER\", \"LEAVE\", \"BT\", \"BTS\", \"BTC\", \"BTR\", \"BSF\",\n+        \"BSR\", \"IRET\", \"INT\", \"POPA\", \"PUSHA\", \"CWD\", \"CBW\", \"DAS\", \"AAD\", \"AAM\", \"AAS\", \"LOOP\", \"SLDT\", \"STR\", \"LLDT\",\n+        \"LTR\", \"VERR\", \"VERW\", \"SAHF\", \"LAHF\", \"WBINVD\", \"LDS\", \"LSS\", \"LES\", \"LGS\", \"LFS\", \"CMC\", \"XLAT\", \"NOP\", \"CMOV\",\n+        \"CLTS\", \"XADD\", \"HLT\", \"CMPXCHG8B\", \"CMPXCHG\", \"POPCNT\",\n+        \"FNINIT\", \"FLD\", \"FLDxx\", \"FNSTCW\", \"FNSTSW\", \"FNSETPM\", \"FSAVE\", \"FRSTOR\", \"FXSAVE\", \"FXRSTOR\", \"FDIV\", \"FMUL\",\n+        \"FSUB\", \"FADD\", \"EMMS\", \"MFENCE\", \"SFENCE\", \"LFENCE\", \"PREFETCH\", \"FST\", \"FABS\", \"FUCOM\", \"FUCOMI\", \"FLDCW\",\n+        \"FXCH\", \"FCHS\", \"FCMOV\", \"FRNDINT\", \"FXAM\", \"LAST\"};\n+    return cmds[cmd];\n+}\n+\n+addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode, addr_t addr, x86_reg_segment seg)\n+{\n+    switch (decode->segment_override) {\n+        case PREFIX_CS_SEG_OVEERIDE:\n+            seg = REG_SEG_CS;\n+            break;\n+        case PREFIX_SS_SEG_OVEERIDE:\n+            seg = REG_SEG_SS;\n+            break;\n+        case PREFIX_DS_SEG_OVEERIDE:\n+            seg = REG_SEG_DS;\n+            break;\n+        case PREFIX_ES_SEG_OVEERIDE:\n+            seg = REG_SEG_ES;\n+            break;\n+        case PREFIX_FS_SEG_OVEERIDE:\n+            seg = REG_SEG_FS;\n+            break;\n+        case PREFIX_GS_SEG_OVEERIDE:\n+            seg = REG_SEG_GS;\n+            break;\n+        default:\n+            break;\n+    }\n+    return linear_addr_size(cpu, addr, decode->addressing_size, seg);\n+}\ndiff --git a/target/i386/hvf-utils/x86_decode.h b/target/i386/hvf-utils/x86_decode.h\nnew file mode 100644\nindex 0000000000..3a22d7d1a5\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_decode.h\n@@ -0,0 +1,314 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#pragma once\n+\n+#include <sys/types.h>\n+#include <sys/ioctl.h>\n+#include <sys/mman.h>\n+#include <stdarg.h>\n+#include \"qemu-common.h\"\n+#include \"x86.h\"\n+\n+typedef enum x86_prefix {\n+    // group 1\n+    PREFIX_LOCK =                  0xf0,\n+    PREFIX_REPN =                  0xf2,\n+    PREFIX_REP =                   0xf3,\n+    // group 2\n+    PREFIX_CS_SEG_OVEERIDE =       0x2e,\n+    PREFIX_SS_SEG_OVEERIDE =       0x36,\n+    PREFIX_DS_SEG_OVEERIDE =       0x3e,\n+    PREFIX_ES_SEG_OVEERIDE =       0x26,\n+    PREFIX_FS_SEG_OVEERIDE =       0x64,\n+    PREFIX_GS_SEG_OVEERIDE =       0x65,\n+    // group 3\n+    PREFIX_OP_SIZE_OVERRIDE =      0x66,\n+    // group 4\n+    PREFIX_ADDR_SIZE_OVERRIDE =    0x67,\n+\n+    PREFIX_REX                   = 0x40,\n+} x86_prefix;\n+\n+enum x86_decode_cmd {\n+    X86_DECODE_CMD_INVL = 0,\n+    \n+    X86_DECODE_CMD_PUSH,\n+    X86_DECODE_CMD_PUSH_SEG,\n+    X86_DECODE_CMD_POP,\n+    X86_DECODE_CMD_POP_SEG,\n+    X86_DECODE_CMD_MOV,\n+    X86_DECODE_CMD_MOVSX,\n+    X86_DECODE_CMD_MOVZX,\n+    X86_DECODE_CMD_CALL_NEAR,\n+    X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,\n+    X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,\n+    X86_DECODE_CMD_CALL_FAR,\n+    X86_DECODE_RET_NEAR,\n+    X86_DECODE_RET_FAR,\n+    X86_DECODE_CMD_ADD,\n+    X86_DECODE_CMD_OR,\n+    X86_DECODE_CMD_ADC,\n+    X86_DECODE_CMD_SBB,\n+    X86_DECODE_CMD_AND,\n+    X86_DECODE_CMD_SUB,\n+    X86_DECODE_CMD_XOR,\n+    X86_DECODE_CMD_CMP,\n+    X86_DECODE_CMD_INC,\n+    X86_DECODE_CMD_DEC,\n+    X86_DECODE_CMD_TST,\n+    X86_DECODE_CMD_NOT,\n+    X86_DECODE_CMD_NEG,\n+    X86_DECODE_CMD_JMP_NEAR,\n+    X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,\n+    X86_DECODE_CMD_JMP_FAR,\n+    X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,\n+    X86_DECODE_CMD_LEA,\n+    X86_DECODE_CMD_JXX,\n+    X86_DECODE_CMD_JCXZ,\n+    X86_DECODE_CMD_SETXX,\n+    X86_DECODE_CMD_MOV_TO_SEG,\n+    X86_DECODE_CMD_MOV_FROM_SEG,\n+    X86_DECODE_CMD_CLI,\n+    X86_DECODE_CMD_STI,\n+    X86_DECODE_CMD_CLD,\n+    X86_DECODE_CMD_STD,\n+    X86_DECODE_CMD_STC,\n+    X86_DECODE_CMD_CLC,\n+    X86_DECODE_CMD_OUT,\n+    X86_DECODE_CMD_IN,\n+    X86_DECODE_CMD_INS,\n+    X86_DECODE_CMD_OUTS,\n+    X86_DECODE_CMD_LIDT,\n+    X86_DECODE_CMD_SIDT,\n+    X86_DECODE_CMD_LGDT,\n+    X86_DECODE_CMD_SGDT,\n+    X86_DECODE_CMD_SMSW,\n+    X86_DECODE_CMD_LMSW,\n+    X86_DECODE_CMD_RDTSCP,\n+    X86_DECODE_CMD_INVLPG,\n+    X86_DECODE_CMD_MOV_TO_CR,\n+    X86_DECODE_CMD_MOV_FROM_CR,\n+    X86_DECODE_CMD_MOV_TO_DR,\n+    X86_DECODE_CMD_MOV_FROM_DR,\n+    X86_DECODE_CMD_PUSHF,\n+    X86_DECODE_CMD_POPF,\n+    X86_DECODE_CMD_CPUID,\n+    X86_DECODE_CMD_ROL,\n+    X86_DECODE_CMD_ROR,\n+    X86_DECODE_CMD_RCL,\n+    X86_DECODE_CMD_RCR,\n+    X86_DECODE_CMD_SHL,\n+    X86_DECODE_CMD_SAL,\n+    X86_DECODE_CMD_SHR,\n+    X86_DECODE_CMD_SHRD,\n+    X86_DECODE_CMD_SHLD,\n+    X86_DECODE_CMD_SAR,\n+    X86_DECODE_CMD_DIV,\n+    X86_DECODE_CMD_IDIV,\n+    X86_DECODE_CMD_MUL,\n+    X86_DECODE_CMD_IMUL_3,\n+    X86_DECODE_CMD_IMUL_2,\n+    X86_DECODE_CMD_IMUL_1,\n+    X86_DECODE_CMD_MOVS,\n+    X86_DECODE_CMD_CMPS,\n+    X86_DECODE_CMD_SCAS,\n+    X86_DECODE_CMD_LODS,\n+    X86_DECODE_CMD_STOS,\n+    X86_DECODE_CMD_BSWAP,\n+    X86_DECODE_CMD_XCHG,\n+    X86_DECODE_CMD_RDTSC,\n+    X86_DECODE_CMD_RDMSR,\n+    X86_DECODE_CMD_WRMSR,\n+    X86_DECODE_CMD_ENTER,\n+    X86_DECODE_CMD_LEAVE,\n+    X86_DECODE_CMD_BT,\n+    X86_DECODE_CMD_BTS,\n+    X86_DECODE_CMD_BTC,\n+    X86_DECODE_CMD_BTR,\n+    X86_DECODE_CMD_BSF,\n+    X86_DECODE_CMD_BSR,\n+    X86_DECODE_CMD_IRET,\n+    X86_DECODE_CMD_INT,\n+    X86_DECODE_CMD_POPA,\n+    X86_DECODE_CMD_PUSHA,\n+    X86_DECODE_CMD_CWD,\n+    X86_DECODE_CMD_CBW,\n+    X86_DECODE_CMD_DAS,\n+    X86_DECODE_CMD_AAD,\n+    X86_DECODE_CMD_AAM,\n+    X86_DECODE_CMD_AAS,\n+    X86_DECODE_CMD_LOOP,\n+    X86_DECODE_CMD_SLDT,\n+    X86_DECODE_CMD_STR,\n+    X86_DECODE_CMD_LLDT,\n+    X86_DECODE_CMD_LTR,\n+    X86_DECODE_CMD_VERR,\n+    X86_DECODE_CMD_VERW,\n+    X86_DECODE_CMD_SAHF,\n+    X86_DECODE_CMD_LAHF,\n+    X86_DECODE_CMD_WBINVD,\n+    X86_DECODE_CMD_LDS,\n+    X86_DECODE_CMD_LSS,\n+    X86_DECODE_CMD_LES,\n+    X86_DECODE_XMD_LGS,\n+    X86_DECODE_CMD_LFS,\n+    X86_DECODE_CMD_CMC,\n+    X86_DECODE_CMD_XLAT,\n+    X86_DECODE_CMD_NOP,\n+    X86_DECODE_CMD_CMOV,\n+    X86_DECODE_CMD_CLTS,\n+    X86_DECODE_CMD_XADD,\n+    X86_DECODE_CMD_HLT,\n+    X86_DECODE_CMD_CMPXCHG8B,\n+    X86_DECODE_CMD_CMPXCHG,\n+    X86_DECODE_CMD_POPCNT,\n+    \n+    X86_DECODE_CMD_FNINIT,\n+    X86_DECODE_CMD_FLD,\n+    X86_DECODE_CMD_FLDxx,\n+    X86_DECODE_CMD_FNSTCW,\n+    X86_DECODE_CMD_FNSTSW,\n+    X86_DECODE_CMD_FNSETPM,\n+    X86_DECODE_CMD_FSAVE,\n+    X86_DECODE_CMD_FRSTOR,\n+    X86_DECODE_CMD_FXSAVE,\n+    X86_DECODE_CMD_FXRSTOR,\n+    X86_DECODE_CMD_FDIV,\n+    X86_DECODE_CMD_FMUL,\n+    X86_DECODE_CMD_FSUB,\n+    X86_DECODE_CMD_FADD,\n+    X86_DECODE_CMD_EMMS,\n+    X86_DECODE_CMD_MFENCE,\n+    X86_DECODE_CMD_SFENCE,\n+    X86_DECODE_CMD_LFENCE,\n+    X86_DECODE_CMD_PREFETCH,\n+    X86_DECODE_CMD_CLFLUSH,\n+    X86_DECODE_CMD_FST,\n+    X86_DECODE_CMD_FABS,\n+    X86_DECODE_CMD_FUCOM,\n+    X86_DECODE_CMD_FUCOMI,\n+    X86_DECODE_CMD_FLDCW,\n+    X86_DECODE_CMD_FXCH,\n+    X86_DECODE_CMD_FCHS,\n+    X86_DECODE_CMD_FCMOV,\n+    X86_DECODE_CMD_FRNDINT,\n+    X86_DECODE_CMD_FXAM,\n+\n+    X86_DECODE_CMD_LAST,\n+};\n+\n+const char *decode_cmd_to_string(enum x86_decode_cmd cmd);\n+\n+typedef struct x86_modrm {\n+    union {\n+        uint8_t modrm;\n+        struct {\n+            uint8_t rm:3;\n+            uint8_t reg:3;\n+            uint8_t mod:2;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x86_modrm;\n+\n+typedef struct x86_sib {\n+    union {\n+        uint8_t sib;\n+        struct {\n+            uint8_t base:3;\n+            uint8_t index:3;\n+            uint8_t scale:2;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x86_sib;\n+\n+typedef struct x86_rex {\n+    union {\n+        uint8_t rex;\n+        struct {\n+            uint8_t b:1;\n+            uint8_t x:1;\n+            uint8_t r:1;\n+            uint8_t w:1;\n+            uint8_t unused:4;\n+        };\n+    };\n+} __attribute__ ((__packed__)) x86_rex;\n+\n+typedef enum x86_var_type {\n+    X86_VAR_IMMEDIATE,\n+    X86_VAR_OFFSET,\n+    X86_VAR_REG,\n+    X86_VAR_RM,\n+\n+    // for floating point computations\n+    X87_VAR_REG,\n+    X87_VAR_FLOATP,\n+    X87_VAR_INTP,\n+    X87_VAR_BYTEP,\n+} x86_var_type;\n+\n+typedef struct x86_decode_op {\n+    enum x86_var_type type;\n+    int size;\n+\n+    int reg;\n+    addr_t val;\n+\n+    addr_t ptr;\n+} x86_decode_op;\n+\n+typedef struct x86_decode {\n+    int len;\n+    uint8_t opcode[4];\n+    uint8_t opcode_len;\n+    enum x86_decode_cmd cmd;\n+    int addressing_size;\n+    int operand_size;\n+    int lock;\n+    int rep;\n+    int op_size_override;\n+    int addr_size_override;\n+    int segment_override;\n+    int control_change_inst;\n+    bool fwait;\n+    bool fpop_stack;\n+    bool frev;\n+\n+    uint32_t displacement;\n+    uint8_t displacement_size;\n+    struct x86_rex rex;\n+    bool is_modrm;\n+    bool sib_present;\n+    struct x86_sib sib;\n+    struct x86_modrm modrm;\n+    struct x86_decode_op op[4];\n+    bool is_fpu;\n+    addr_t flags_mask;\n+\n+} x86_decode;\n+\n+uint64_t sign(uint64_t val, int size);\n+\n+uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode);\n+\n+addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size);\n+addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size);\n+void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op);\n+addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode, addr_t addr, x86_reg_segment seg);\n+\n+void init_decoder(CPUState* cpu);\ndiff --git a/target/i386/hvf-utils/x86_descr.c b/target/i386/hvf-utils/x86_descr.c\nnew file mode 100644\nindex 0000000000..c3b089aaa8\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_descr.c\n@@ -0,0 +1,124 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#include \"qemu/osdep.h\"\n+\n+#include \"vmx.h\"\n+#include \"x86_descr.h\"\n+\n+#define VMX_SEGMENT_FIELD(seg)                      \\\n+    [REG_SEG_##seg] = {                           \\\n+        .selector = VMCS_GUEST_##seg##_SELECTOR,             \\\n+        .base = VMCS_GUEST_##seg##_BASE,                     \\\n+        .limit = VMCS_GUEST_##seg##_LIMIT,                   \\\n+        .ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS,             \\\n+}\n+\n+static const struct vmx_segment_field {\n+    int selector;\n+    int base;\n+    int limit;\n+    int ar_bytes;\n+} vmx_segment_fields[] = {\n+    VMX_SEGMENT_FIELD(ES),\n+    VMX_SEGMENT_FIELD(CS),\n+    VMX_SEGMENT_FIELD(SS),\n+    VMX_SEGMENT_FIELD(DS),\n+    VMX_SEGMENT_FIELD(FS),\n+    VMX_SEGMENT_FIELD(GS),\n+    VMX_SEGMENT_FIELD(LDTR),\n+    VMX_SEGMENT_FIELD(TR),\n+};\n+\n+uint32_t vmx_read_segment_limit(CPUState *cpu, x86_reg_segment seg)\n+{\n+    return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);\n+}\n+\n+uint32_t vmx_read_segment_ar(CPUState *cpu, x86_reg_segment seg)\n+{\n+    return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);\n+}\n+\n+uint64_t vmx_read_segment_base(CPUState *cpu, x86_reg_segment seg)\n+{\n+    return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);\n+}\n+\n+x68_segment_selector vmx_read_segment_selector(CPUState *cpu, x86_reg_segment seg)\n+{\n+    x68_segment_selector sel;\n+    sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);\n+    return sel;\n+}\n+\n+void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, x86_reg_segment seg)\n+{\n+    wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);\n+}\n+\n+void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg)\n+{\n+    desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);\n+    desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);\n+    desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);\n+    desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);\n+}\n+\n+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg)\n+{\n+    const struct vmx_segment_field *sf = &vmx_segment_fields[seg];\n+\n+    wvmcs(cpu->hvf_fd, sf->base, desc->base);\n+    wvmcs(cpu->hvf_fd, sf->limit, desc->limit);\n+    wvmcs(cpu->hvf_fd, sf->selector, desc->sel);\n+    wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);\n+}\n+\n+void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)\n+{\n+    vmx_desc->sel = selector.sel;\n+    vmx_desc->base = x86_segment_base(desc);\n+    vmx_desc->limit = x86_segment_limit(desc);\n+\n+    vmx_desc->ar = (selector.sel ? 0 : 1) << 16 |\n+                    desc->g << 15 |\n+                    desc->db << 14 |\n+                    desc->l << 13 |\n+                    desc->avl << 12 |\n+                    desc->p << 7 |\n+                    desc->dpl << 5 |\n+                    desc->s << 4 |\n+                    desc->type;\n+}\n+\n+void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc)\n+{\n+    x86_set_segment_limit(desc, vmx_desc->limit);\n+    x86_set_segment_base(desc, vmx_desc->base);\n+    \n+    desc->type = vmx_desc->ar & 15;\n+    desc->s = (vmx_desc->ar >> 4) & 1;\n+    desc->dpl = (vmx_desc->ar >> 5) & 3;\n+    desc->p = (vmx_desc->ar >> 7) & 1;\n+    desc->avl = (vmx_desc->ar >> 12) & 1;\n+    desc->l = (vmx_desc->ar >> 13) & 1;\n+    desc->db = (vmx_desc->ar >> 14) & 1;\n+    desc->g = (vmx_desc->ar >> 15) & 1;\n+}\n+\ndiff --git a/target/i386/hvf-utils/x86_descr.h b/target/i386/hvf-utils/x86_descr.h\nnew file mode 100644\nindex 0000000000..78fb1bc420\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_descr.h\n@@ -0,0 +1,40 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#pragma once\n+\n+#include \"x86.h\"\n+\n+typedef struct vmx_segment {\n+    uint16_t sel;\n+    uint64_t base;\n+    uint64_t limit;\n+    uint64_t ar;\n+} vmx_segment;\n+\n+// deal with vmstate descriptors\n+void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg);\n+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg);\n+\n+x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu, x86_reg_segment seg);\n+void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, x86_reg_segment seg);\n+\n+uint64_t vmx_read_segment_base(struct CPUState *cpu, x86_reg_segment seg);\n+void vmx_write_segment_base(struct CPUState *cpu, x86_reg_segment seg, uint64_t base);\n+\n+void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc);\ndiff --git a/target/i386/hvf-utils/x86_emu.c b/target/i386/hvf-utils/x86_emu.c\nnew file mode 100644\nindex 0000000000..8b5efc76f0\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_emu.c\n@@ -0,0 +1,1466 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+/////////////////////////////////////////////////////////////////////////\n+//\n+//  Copyright (C) 2001-2012  The Bochs Project\n+//\n+//  This library is free software; you can redistribute it and/or\n+//  modify it under the terms of the GNU Lesser General Public\n+//  License as published by the Free Software Foundation; either\n+//  version 2 of the License, or (at your option) any later version.\n+//\n+//  This library is distributed in the hope that it will be useful,\n+//  but WITHOUT ANY WARRANTY; without even the implied warranty of\n+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+//  Lesser General Public License for more details.\n+//\n+//  You should have received a copy of the GNU Lesser General Public\n+//  License along with this library; if not, write to the Free Software\n+//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA\n+/////////////////////////////////////////////////////////////////////////\n+\n+#include \"qemu/osdep.h\"\n+\n+#include \"qemu-common.h\"\n+#include \"x86_decode.h\"\n+#include \"x86.h\"\n+#include \"x86_emu.h\"\n+#include \"x86_mmu.h\"\n+#include \"vmcs.h\"\n+#include \"vmx.h\"\n+\n+static void print_debug(struct CPUState *cpu);\n+void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int direction, int size, uint32_t count);\n+\n+#define EXEC_2OP_LOGIC_CMD(cpu, decode, cmd, FLAGS_FUNC, save_res) \\\n+{                                                       \\\n+    fetch_operands(cpu, decode, 2, true, true, false);  \\\n+    switch (decode->operand_size) {                     \\\n+    case 1:                                         \\\n+    {                                               \\\n+        uint8_t v1 = (uint8_t)decode->op[0].val;    \\\n+        uint8_t v2 = (uint8_t)decode->op[1].val;    \\\n+        uint8_t diff = v1 cmd v2;                   \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 1);  \\\n+        FLAGS_FUNC##_8(diff);                       \\\n+        break;                                      \\\n+    }                                               \\\n+    case 2:                                        \\\n+    {                                               \\\n+        uint16_t v1 = (uint16_t)decode->op[0].val;  \\\n+        uint16_t v2 = (uint16_t)decode->op[1].val;  \\\n+        uint16_t diff = v1 cmd v2;                  \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+        FLAGS_FUNC##_16(diff);                      \\\n+        break;                                      \\\n+    }                                               \\\n+    case 4:                                        \\\n+    {                                               \\\n+        uint32_t v1 = (uint32_t)decode->op[0].val;  \\\n+        uint32_t v2 = (uint32_t)decode->op[1].val;  \\\n+        uint32_t diff = v1 cmd v2;                  \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+        FLAGS_FUNC##_32(diff);                      \\\n+        break;                                      \\\n+    }                                               \\\n+    default:                                        \\\n+        VM_PANIC(\"bad size\\n\");                    \\\n+    }                                                   \\\n+}                                                       \\\n+\n+\n+#define EXEC_2OP_ARITH_CMD(cpu, decode, cmd, FLAGS_FUNC, save_res) \\\n+{                                                       \\\n+    fetch_operands(cpu, decode, 2, true, true, false);  \\\n+    switch (decode->operand_size) {                     \\\n+    case 1:                                         \\\n+    {                                               \\\n+        uint8_t v1 = (uint8_t)decode->op[0].val;    \\\n+        uint8_t v2 = (uint8_t)decode->op[1].val;    \\\n+        uint8_t diff = v1 cmd v2;                   \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 1);  \\\n+        FLAGS_FUNC##_8(v1, v2, diff);               \\\n+        break;                                      \\\n+    }                                               \\\n+    case 2:                                        \\\n+    {                                               \\\n+        uint16_t v1 = (uint16_t)decode->op[0].val;  \\\n+        uint16_t v2 = (uint16_t)decode->op[1].val;  \\\n+        uint16_t diff = v1 cmd v2;                  \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+        FLAGS_FUNC##_16(v1, v2, diff);              \\\n+        break;                                      \\\n+    }                                               \\\n+    case 4:                                        \\\n+    {                                               \\\n+        uint32_t v1 = (uint32_t)decode->op[0].val;  \\\n+        uint32_t v2 = (uint32_t)decode->op[1].val;  \\\n+        uint32_t diff = v1 cmd v2;                  \\\n+        if (save_res)                               \\\n+            write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+        FLAGS_FUNC##_32(v1, v2, diff);              \\\n+        break;                                      \\\n+    }                                               \\\n+    default:                                        \\\n+        VM_PANIC(\"bad size\\n\");                    \\\n+    }                                                   \\\n+}\n+\n+addr_t read_reg(struct CPUState* cpu, int reg, int size)\n+{\n+    switch (size) {\n+        case 1:\n+            return cpu->hvf_x86->regs[reg].lx;\n+        case 2:\n+            return cpu->hvf_x86->regs[reg].rx;\n+        case 4:\n+            return cpu->hvf_x86->regs[reg].erx;\n+        case 8:\n+            return cpu->hvf_x86->regs[reg].rrx;\n+        default:\n+            VM_PANIC_ON(\"read_reg size\");\n+    }\n+    return 0;\n+}\n+\n+void write_reg(struct CPUState* cpu, int reg, addr_t val, int size)\n+{\n+    switch (size) {\n+        case 1:\n+            cpu->hvf_x86->regs[reg].lx = val;\n+            break;\n+        case 2:\n+            cpu->hvf_x86->regs[reg].rx = val;\n+            break;\n+        case 4:\n+            cpu->hvf_x86->regs[reg].rrx = (uint32_t)val;\n+            break;\n+        case 8:\n+            cpu->hvf_x86->regs[reg].rrx = val;\n+            break;\n+        default:\n+            VM_PANIC_ON(\"write_reg size\");\n+    }\n+}\n+\n+addr_t read_val_from_reg(addr_t reg_ptr, int size)\n+{\n+    addr_t val;\n+    \n+    switch (size) {\n+        case 1:\n+            val = *(uint8_t*)reg_ptr;\n+            break;\n+        case 2:\n+            val = *(uint16_t*)reg_ptr;\n+            break;\n+        case 4:\n+            val = *(uint32_t*)reg_ptr;\n+            break;\n+        case 8:\n+            val = *(uint64_t*)reg_ptr;\n+            break;\n+        default:\n+            VM_PANIC_ON_EX(1, \"read_val: Unknown size %d\\n\", size);\n+            break;\n+    }\n+    return val;\n+}\n+\n+void write_val_to_reg(addr_t reg_ptr, addr_t val, int size)\n+{\n+    switch (size) {\n+        case 1:\n+            *(uint8_t*)reg_ptr = val;\n+            break;\n+        case 2:\n+            *(uint16_t*)reg_ptr = val;\n+            break;\n+        case 4:\n+            *(uint64_t*)reg_ptr = (uint32_t)val;\n+            break;\n+        case 8:\n+            *(uint64_t*)reg_ptr = val;\n+            break;\n+        default:\n+            VM_PANIC(\"write_val: Unknown size\\n\");\n+            break;\n+    }\n+}\n+\n+static bool is_host_reg(struct CPUState* cpu, addr_t ptr) {\n+    return (ptr > (addr_t)cpu && ptr < (addr_t)cpu + sizeof(struct CPUState)) ||\n+           (ptr > (addr_t)cpu->hvf_x86 && ptr < (addr_t)(cpu->hvf_x86 + sizeof(struct hvf_x86_state)));\n+}\n+\n+void write_val_ext(struct CPUState* cpu, addr_t ptr, addr_t val, int size)\n+{\n+    if (is_host_reg(cpu, ptr)) {\n+        write_val_to_reg(ptr, val, size);\n+        return;\n+    }\n+    vmx_write_mem(cpu, ptr, &val, size);\n+}\n+\n+uint8_t *read_mmio(struct CPUState* cpu, addr_t ptr, int bytes)\n+{\n+    vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, ptr, bytes);\n+    return cpu->hvf_x86->mmio_buf;\n+}\n+\n+addr_t read_val_ext(struct CPUState* cpu, addr_t ptr, int size)\n+{\n+    addr_t val;\n+    uint8_t *mmio_ptr;\n+    \n+    if (is_host_reg(cpu, ptr)) {\n+        return read_val_from_reg(ptr, size);\n+    }\n+    \n+    mmio_ptr = read_mmio(cpu, ptr, size);\n+    switch (size) {\n+        case 1:\n+            val = *(uint8_t*)mmio_ptr;\n+            break;\n+        case 2:\n+            val = *(uint16_t*)mmio_ptr;\n+            break;\n+        case 4:\n+            val = *(uint32_t*)mmio_ptr;\n+            break;\n+        case 8:\n+            val = *(uint64_t*)mmio_ptr;\n+            break;\n+        default:\n+            VM_PANIC(\"bad size\\n\");\n+            break;\n+    }\n+    return val;\n+}\n+\n+static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode, int n, bool val_op0, bool val_op1, bool val_op2)\n+{\n+    int i;\n+    bool calc_val[3] = {val_op0, val_op1, val_op2};\n+\n+    for (i = 0; i < n; i++) {\n+        switch (decode->op[i].type) {\n+            case X86_VAR_IMMEDIATE:\n+                break;\n+            case X86_VAR_REG:\n+                VM_PANIC_ON(!decode->op[i].ptr);\n+                if (calc_val[i])\n+                    decode->op[i].val = read_val_from_reg(decode->op[i].ptr, decode->operand_size);\n+                break;\n+            case X86_VAR_RM:\n+                calc_modrm_operand(cpu, decode, &decode->op[i]);\n+                if (calc_val[i])\n+                    decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr, decode->operand_size);\n+                break;\n+            case X86_VAR_OFFSET:\n+                decode->op[i].ptr = decode_linear_addr(cpu, decode, decode->op[i].ptr, REG_SEG_DS);\n+                if (calc_val[i])\n+                    decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr, decode->operand_size);\n+                break;\n+            default:\n+                break;\n+        }\n+    }\n+}\n+\n+static void exec_mov(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    fetch_operands(cpu, decode, 2, false, true, false);\n+    write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, decode->operand_size);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_add(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_or(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_LOGIC_CMD(cpu, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_adc(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, +get_CF(cpu)+, SET_FLAGS_OSZAPC_ADD, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_sbb(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -get_CF(cpu)-, SET_FLAGS_OSZAPC_SUB, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_and(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_LOGIC_CMD(cpu, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_sub(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_xor(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_LOGIC_CMD(cpu, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_neg(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    //EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+    int32_t val;\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+\n+    val = 0 - sign(decode->op[1].val, decode->operand_size);\n+    write_val_ext(cpu, decode->op[1].ptr, val, decode->operand_size);\n+\n+    if (4 == decode->operand_size) {\n+        SET_FLAGS_OSZAPC_SUB_32(0, 0 - val, val);\n+    }\n+    else if (2 == decode->operand_size) {\n+        SET_FLAGS_OSZAPC_SUB_16(0, 0 - val, val);\n+    }\n+    else if (1 == decode->operand_size) {\n+        SET_FLAGS_OSZAPC_SUB_8(0, 0 - val, val);\n+    } else {\n+        VM_PANIC(\"bad op size\\n\");\n+    }\n+\n+    //lflags_to_rflags(cpu);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_cmp(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_inc(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[1].type = X86_VAR_IMMEDIATE;\n+    decode->op[1].val = 0;\n+\n+    EXEC_2OP_ARITH_CMD(cpu, decode, +1+, SET_FLAGS_OSZAP_ADD, true);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_dec(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[1].type = X86_VAR_IMMEDIATE;\n+    decode->op[1].val = 0;\n+\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -1-, SET_FLAGS_OSZAP_SUB, true);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_tst(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_LOGIC_CMD(cpu, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_not(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    fetch_operands(cpu, decode, 1, true, false, false);\n+\n+    write_val_ext(cpu, decode->op[0].ptr, ~decode->op[0].val, decode->operand_size);\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_movzx(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    int src_op_size;\n+    int op_size = decode->operand_size;\n+\n+    fetch_operands(cpu, decode, 1, false, false, false);\n+\n+    if (0xb6 == decode->opcode[1])\n+        src_op_size = 1;\n+    else\n+        src_op_size = 2;\n+    decode->operand_size = src_op_size;\n+    calc_modrm_operand(cpu, decode, &decode->op[1]);\n+    decode->op[1].val = read_val_ext(cpu, decode->op[1].ptr, src_op_size);\n+    write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, op_size);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_out(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    switch (decode->opcode[0]) {\n+        case 0xe6:\n+            hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 1, 1, 1);\n+            break;\n+        case 0xe7:\n+            hvf_handle_io(cpu, decode->op[0].val, &RAX(cpu), 1, decode->operand_size, 1);\n+            break;\n+        case 0xee:\n+            hvf_handle_io(cpu, DX(cpu), &AL(cpu), 1, 1, 1);\n+            break;\n+        case 0xef:\n+            hvf_handle_io(cpu, DX(cpu), &RAX(cpu), 1, decode->operand_size, 1);\n+            break;\n+        default:\n+            VM_PANIC(\"Bad out opcode\\n\");\n+            break;\n+    }\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_in(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t val = 0;\n+    switch (decode->opcode[0]) {\n+        case 0xe4:\n+            hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 0, 1, 1);\n+            break;\n+        case 0xe5:\n+            hvf_handle_io(cpu, decode->op[0].val, &val, 0, decode->operand_size, 1);\n+            if (decode->operand_size == 2)\n+                AX(cpu) = val;\n+            else\n+                RAX(cpu) = (uint32_t)val;\n+            break;\n+        case 0xec:\n+            hvf_handle_io(cpu, DX(cpu), &AL(cpu), 0, 1, 1);\n+            break;\n+        case 0xed:\n+            hvf_handle_io(cpu, DX(cpu), &val, 0, decode->operand_size, 1);\n+            if (decode->operand_size == 2)\n+                AX(cpu) = val;\n+            else\n+                RAX(cpu) = (uint32_t)val;\n+\n+            break;\n+        default:\n+            VM_PANIC(\"Bad in opcode\\n\");\n+            break;\n+    }\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static inline void string_increment_reg(struct CPUState * cpu, int reg, struct x86_decode *decode)\n+{\n+    addr_t val = read_reg(cpu, reg, decode->addressing_size);\n+    if (cpu->hvf_x86->rflags.df)\n+        val -= decode->operand_size;\n+    else\n+        val += decode->operand_size;\n+    write_reg(cpu, reg, val, decode->addressing_size);\n+}\n+\n+static inline void string_rep(struct CPUState * cpu, struct x86_decode *decode, void (*func)(struct CPUState *cpu, struct x86_decode *ins), int rep)\n+{\n+    addr_t rcx = read_reg(cpu, REG_RCX, decode->addressing_size);\n+    while (rcx--) {\n+        func(cpu, decode);\n+        write_reg(cpu, REG_RCX, rcx, decode->addressing_size);\n+        if ((PREFIX_REP == rep) && !get_ZF(cpu))\n+            break;\n+        if ((PREFIX_REPN == rep) && get_ZF(cpu))\n+            break;\n+    }\n+}\n+\n+static void exec_ins_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+\n+    hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 0, decode->operand_size, 1);\n+    vmx_write_mem(cpu, addr, cpu->hvf_x86->mmio_buf, decode->operand_size);\n+\n+    string_increment_reg(cpu, REG_RDI, decode);\n+}\n+\n+static void exec_ins(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep)\n+        string_rep(cpu, decode, exec_ins_single, 0);\n+    else\n+        exec_ins_single(cpu, decode);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_outs_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n+\n+    vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, addr, decode->operand_size);\n+    hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 1, decode->operand_size, 1);\n+\n+    string_increment_reg(cpu, REG_RSI, decode);\n+}\n+\n+static void exec_outs(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep)\n+        string_rep(cpu, decode, exec_outs_single, 0);\n+    else\n+        exec_outs_single(cpu, decode);\n+    \n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_movs_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t src_addr;\n+    addr_t dst_addr;\n+    addr_t val;\n+    \n+    src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n+    dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+    \n+    val = read_val_ext(cpu, src_addr, decode->operand_size);\n+    write_val_ext(cpu, dst_addr, val, decode->operand_size);\n+\n+    string_increment_reg(cpu, REG_RSI, decode);\n+    string_increment_reg(cpu, REG_RDI, decode);\n+}\n+\n+static void exec_movs(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep) {\n+        string_rep(cpu, decode, exec_movs_single, 0);\n+    }\n+    else\n+        exec_movs_single(cpu, decode);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_cmps_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t src_addr;\n+    addr_t dst_addr;\n+\n+    src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n+    dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+\n+    decode->op[0].type = X86_VAR_IMMEDIATE;\n+    decode->op[0].val = read_val_ext(cpu, src_addr, decode->operand_size);\n+    decode->op[1].type = X86_VAR_IMMEDIATE;\n+    decode->op[1].val = read_val_ext(cpu, dst_addr, decode->operand_size);\n+\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+\n+    string_increment_reg(cpu, REG_RSI, decode);\n+    string_increment_reg(cpu, REG_RDI, decode);\n+}\n+\n+static void exec_cmps(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep) {\n+        string_rep(cpu, decode, exec_cmps_single, decode->rep);\n+    }\n+    else\n+        exec_cmps_single(cpu, decode);\n+    RIP(cpu) += decode->len;\n+}\n+\n+\n+static void exec_stos_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t addr;\n+    addr_t val;\n+\n+    addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+    val = read_reg(cpu, REG_RAX, decode->operand_size);\n+    vmx_write_mem(cpu, addr, &val, decode->operand_size);\n+\n+    string_increment_reg(cpu, REG_RDI, decode);\n+}\n+\n+\n+static void exec_stos(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep) {\n+        string_rep(cpu, decode, exec_stos_single, 0);\n+    }\n+    else\n+        exec_stos_single(cpu, decode);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_scas_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t addr;\n+    \n+    addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+    decode->op[1].type = X86_VAR_IMMEDIATE;\n+    vmx_read_mem(cpu, &decode->op[1].val, addr, decode->operand_size);\n+\n+    EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+    string_increment_reg(cpu, REG_RDI, decode);\n+}\n+\n+static void exec_scas(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    decode->op[0].type = X86_VAR_REG;\n+    decode->op[0].reg = REG_RAX;\n+    if (decode->rep) {\n+        string_rep(cpu, decode, exec_scas_single, decode->rep);\n+    }\n+    else\n+        exec_scas_single(cpu, decode);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_lods_single(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    addr_t addr;\n+    addr_t val = 0;\n+    \n+    addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n+    vmx_read_mem(cpu, &val, addr,  decode->operand_size);\n+    write_reg(cpu, REG_RAX, val, decode->operand_size);\n+\n+    string_increment_reg(cpu, REG_RSI, decode);\n+}\n+\n+static void exec_lods(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    if (decode->rep) {\n+        string_rep(cpu, decode, exec_lods_single, 0);\n+    }\n+    else\n+        exec_lods_single(cpu, decode);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+#define MSR_IA32_UCODE_REV \t\t0x00000017\n+\n+void simulate_rdmsr(struct CPUState *cpu)\n+{\n+    X86CPU *x86_cpu = X86_CPU(cpu);\n+    CPUX86State *env = &x86_cpu->env;\n+    uint32_t msr = ECX(cpu);\n+    uint64_t val = 0;\n+\n+    switch (msr) {\n+        case MSR_IA32_TSC:\n+            val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);\n+            break;\n+        case MSR_IA32_APICBASE:\n+            val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);\n+            break;\n+        case MSR_IA32_UCODE_REV:\n+            val = (0x100000000ULL << 32) | 0x100000000ULL;\n+            break;\n+        case MSR_EFER:\n+            val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);\n+            break;\n+        case MSR_FSBASE:\n+            val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);\n+            break;\n+        case MSR_GSBASE:\n+            val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);\n+            break;\n+        case MSR_KERNELGSBASE:\n+            val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);\n+            break;\n+        case MSR_STAR:\n+            abort();\n+            break;\n+        case MSR_LSTAR:\n+            abort();\n+            break;\n+        case MSR_CSTAR:\n+            abort();\n+            break;\n+        case MSR_IA32_MISC_ENABLE:\n+            val = env->msr_ia32_misc_enable;\n+            break;\n+        case MSR_MTRRphysBase(0):\n+        case MSR_MTRRphysBase(1):\n+        case MSR_MTRRphysBase(2):\n+        case MSR_MTRRphysBase(3):\n+        case MSR_MTRRphysBase(4):\n+        case MSR_MTRRphysBase(5):\n+        case MSR_MTRRphysBase(6):\n+        case MSR_MTRRphysBase(7):\n+            val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base;\n+            break;\n+        case MSR_MTRRphysMask(0):\n+        case MSR_MTRRphysMask(1):\n+        case MSR_MTRRphysMask(2):\n+        case MSR_MTRRphysMask(3):\n+        case MSR_MTRRphysMask(4):\n+        case MSR_MTRRphysMask(5):\n+        case MSR_MTRRphysMask(6):\n+        case MSR_MTRRphysMask(7):\n+            val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask;\n+            break;\n+        case MSR_MTRRfix64K_00000:\n+            val = env->mtrr_fixed[0];\n+            break;\n+        case MSR_MTRRfix16K_80000:\n+        case MSR_MTRRfix16K_A0000:\n+            val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1];\n+            break;\n+        case MSR_MTRRfix4K_C0000:\n+        case MSR_MTRRfix4K_C8000:\n+        case MSR_MTRRfix4K_D0000:\n+        case MSR_MTRRfix4K_D8000:\n+        case MSR_MTRRfix4K_E0000:\n+        case MSR_MTRRfix4K_E8000:\n+        case MSR_MTRRfix4K_F0000:\n+        case MSR_MTRRfix4K_F8000:\n+            val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3];\n+            break;\n+        case MSR_MTRRdefType:\n+            val = env->mtrr_deftype;\n+            break;\n+        default:\n+            // fprintf(stderr, \"%s: unknown msr 0x%x\\n\", __func__, msr);\n+            val = 0;\n+            break;\n+    }\n+\n+    RAX(cpu) = (uint32_t)val;\n+    RDX(cpu) = (uint32_t)(val >> 32);\n+}\n+\n+static void exec_rdmsr(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    simulate_rdmsr(cpu);\n+    RIP(cpu) += decode->len;\n+}\n+\n+void simulate_wrmsr(struct CPUState *cpu)\n+{\n+    X86CPU *x86_cpu = X86_CPU(cpu);\n+    CPUX86State *env = &x86_cpu->env;\n+    uint32_t msr = ECX(cpu);\n+    uint64_t data = ((uint64_t)EDX(cpu) << 32) | EAX(cpu);\n+\n+    switch (msr) {\n+        case MSR_IA32_TSC:\n+            // if (!osx_is_sierra())\n+            //     wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());\n+            //hv_vm_sync_tsc(data);\n+            break;\n+        case MSR_IA32_APICBASE:\n+            cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);\n+            break;\n+        case MSR_FSBASE:\n+            wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);\n+            break;\n+        case MSR_GSBASE:\n+            wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);\n+            break;\n+        case MSR_KERNELGSBASE:\n+            wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);\n+            break;\n+        case MSR_STAR:\n+            abort();\n+            break;\n+        case MSR_LSTAR:\n+            abort();\n+            break;\n+        case MSR_CSTAR:\n+            abort();\n+            break;\n+        case MSR_EFER:\n+            cpu->hvf_x86->efer.efer = data;\n+            //printf(\"new efer %llx\\n\", EFER(cpu));\n+            wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);\n+            if (data & EFER_NXE)\n+                hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n+            break;\n+        case MSR_MTRRphysBase(0):\n+        case MSR_MTRRphysBase(1):\n+        case MSR_MTRRphysBase(2):\n+        case MSR_MTRRphysBase(3):\n+        case MSR_MTRRphysBase(4):\n+        case MSR_MTRRphysBase(5):\n+        case MSR_MTRRphysBase(6):\n+        case MSR_MTRRphysBase(7):\n+            env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base = data;\n+            break;\n+        case MSR_MTRRphysMask(0):\n+        case MSR_MTRRphysMask(1):\n+        case MSR_MTRRphysMask(2):\n+        case MSR_MTRRphysMask(3):\n+        case MSR_MTRRphysMask(4):\n+        case MSR_MTRRphysMask(5):\n+        case MSR_MTRRphysMask(6):\n+        case MSR_MTRRphysMask(7):\n+            env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask = data;\n+            break;\n+        case MSR_MTRRfix64K_00000:\n+            env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix64K_00000] = data;\n+            break;\n+        case MSR_MTRRfix16K_80000:\n+        case MSR_MTRRfix16K_A0000:\n+            env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1] = data;\n+            break;\n+        case MSR_MTRRfix4K_C0000:\n+        case MSR_MTRRfix4K_C8000:\n+        case MSR_MTRRfix4K_D0000:\n+        case MSR_MTRRfix4K_D8000:\n+        case MSR_MTRRfix4K_E0000:\n+        case MSR_MTRRfix4K_E8000:\n+        case MSR_MTRRfix4K_F0000:\n+        case MSR_MTRRfix4K_F8000:\n+            env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3] = data;\n+            break;\n+        case MSR_MTRRdefType:\n+            env->mtrr_deftype = data;\n+            break;\n+        default:\n+            break;\n+    }\n+\n+    /* Related to support known hypervisor interface */\n+    // if (g_hypervisor_iface)\n+    //     g_hypervisor_iface->wrmsr_handler(cpu, msr, data);\n+\n+    //printf(\"write msr %llx\\n\", RCX(cpu));\n+}\n+\n+static void exec_wrmsr(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    simulate_wrmsr(cpu);\n+    RIP(cpu) += decode->len;\n+}\n+\n+/*\n+ * flag:\n+ * 0 - bt, 1 - btc, 2 - bts, 3 - btr\n+ */\n+static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n+{\n+    int32_t displacement;\n+    uint8_t index;\n+    bool cf;\n+    int mask = (4 == decode->operand_size) ? 0x1f : 0xf;\n+\n+    VM_PANIC_ON(decode->rex.rex);\n+\n+    fetch_operands(cpu, decode, 2, false, true, false);\n+    index = decode->op[1].val & mask;\n+\n+    if (decode->op[0].type != X86_VAR_REG) {\n+        if (4 == decode->operand_size) {\n+            displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;\n+            decode->op[0].ptr += 4 * displacement;\n+        } else if (2 == decode->operand_size) {\n+            displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;\n+            decode->op[0].ptr += 2 * displacement;\n+        } else {\n+            VM_PANIC(\"bt 64bit\\n\");\n+        }\n+    }\n+    decode->op[0].val = read_val_ext(cpu, decode->op[0].ptr, decode->operand_size);\n+    cf = (decode->op[0].val >> index) & 0x01;\n+\n+    switch (flag) {\n+        case 0:\n+            set_CF(cpu, cf);\n+            return;\n+        case 1:\n+            decode->op[0].val ^= (1u << index);\n+            break;\n+        case 2:\n+            decode->op[0].val |= (1u << index);\n+            break;\n+        case 3:\n+            decode->op[0].val &= ~(1u << index);\n+            break;\n+    }\n+    write_val_ext(cpu, decode->op[0].ptr, decode->op[0].val, decode->operand_size);\n+    set_CF(cpu, cf);\n+}\n+\n+static void exec_bt(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    do_bt(cpu, decode, 0);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_btc(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    do_bt(cpu, decode, 1);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_btr(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    do_bt(cpu, decode, 3);\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_bts(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    do_bt(cpu, decode, 2);\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t count;\n+    int of = 0, cf = 0;\n+\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+\n+    count = decode->op[1].val;\n+    count &= 0x1f;      // count is masked to 5 bits\n+    if (!count)\n+        goto exit;\n+\n+    switch (decode->operand_size) {\n+        case 1:\n+        {\n+            uint8_t res = 0;\n+            if (count <= 8) {\n+                res = (decode->op[0].val << count);\n+                cf = (decode->op[0].val >> (8 - count)) & 0x1;\n+                of = cf ^ (res >> 7);\n+            }\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+            SET_FLAGS_OSZAPC_LOGIC_8(res);\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 2:\n+        {\n+            uint16_t res = 0;\n+\n+            /* from bochs */\n+            if (count <= 16) {\n+                res = (decode->op[0].val << count);\n+                cf = (decode->op[0].val >> (16 - count)) & 0x1;\n+                of = cf ^ (res >> 15); // of = cf ^ result15\n+            }\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+            SET_FLAGS_OSZAPC_LOGIC_16(res);\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 4:\n+        {\n+            uint32_t res = decode->op[0].val << count;\n+            \n+            write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+            SET_FLAGS_OSZAPC_LOGIC_32(res);\n+            cf = (decode->op[0].val >> (32 - count)) & 0x1;\n+            of = cf ^ (res >> 31); // of = cf ^ result31\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        default:\n+            abort();\n+    }\n+\n+exit:\n+    //lflags_to_rflags(cpu);\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_movsx(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    int src_op_size;\n+    int op_size = decode->operand_size;\n+\n+    fetch_operands(cpu, decode, 2, false, false, false);\n+\n+    if (0xbe == decode->opcode[1])\n+        src_op_size = 1;\n+    else\n+        src_op_size = 2;\n+\n+    decode->operand_size = src_op_size;\n+    calc_modrm_operand(cpu, decode, &decode->op[1]);\n+    decode->op[1].val = sign(read_val_ext(cpu, decode->op[1].ptr, src_op_size), src_op_size);\n+\n+    write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, op_size);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t count;\n+\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+    count = decode->op[1].val;\n+\n+    switch (decode->operand_size) {\n+        case 1:\n+        {\n+            uint32_t bit6, bit7;\n+            uint8_t res;\n+\n+            if ((count & 0x07) == 0) {\n+                if (count & 0x18) {\n+                    bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;\n+                    bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;\n+                    SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n+                 }\n+            } else {\n+                count &= 0x7; /* use only bottom 3 bits */\n+                res = ((uint8_t)decode->op[0].val >> count) | ((uint8_t)decode->op[0].val << (8 - count));\n+                write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+                bit6 = (res >> 6) & 1;\n+                bit7 = (res >> 7) & 1;\n+                /* set eflags: ROR count affects the following flags: C, O */\n+                SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n+            }\n+            break;\n+        }\n+        case 2:\n+        {\n+            uint32_t bit14, bit15;\n+            uint16_t res;\n+\n+            if ((count & 0x0f) == 0) {\n+                if (count & 0x10) {\n+                    bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;\n+                    bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;\n+                    // of = result14 ^ result15\n+                    SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n+                }\n+            } else {\n+                count &= 0x0f;  // use only 4 LSB's\n+                res = ((uint16_t)decode->op[0].val >> count) | ((uint16_t)decode->op[0].val << (16 - count));\n+                write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+\n+                bit14 = (res >> 14) & 1;\n+                bit15 = (res >> 15) & 1;\n+                // of = result14 ^ result15\n+                SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n+            }\n+            break;\n+        }\n+        case 4:\n+        {\n+            uint32_t bit31, bit30;\n+            uint32_t res;\n+\n+            count &= 0x1f;\n+            if (count) {\n+                res = ((uint32_t)decode->op[0].val >> count) | ((uint32_t)decode->op[0].val << (32 - count));\n+                write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+                bit31 = (res >> 31) & 1;\n+                bit30 = (res >> 30) & 1;\n+                // of = result30 ^ result31\n+                SET_FLAGS_OxxxxC(cpu, bit30 ^ bit31, bit31);\n+            }\n+            break;\n+        }\n+    }\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t count;\n+\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+    count = decode->op[1].val;\n+\n+    switch (decode->operand_size) {\n+        case 1:\n+        {\n+            uint32_t bit0, bit7;\n+            uint8_t res;\n+\n+            if ((count & 0x07) == 0) {\n+                if (count & 0x18) {\n+                    bit0 = ((uint8_t)decode->op[0].val & 1);\n+                    bit7 = ((uint8_t)decode->op[0].val >> 7);\n+                    SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n+                }\n+            }  else {\n+                count &= 0x7; // use only lowest 3 bits\n+                res = ((uint8_t)decode->op[0].val << count) | ((uint8_t)decode->op[0].val >> (8 - count));\n+\n+                write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+                /* set eflags:\n+                 * ROL count affects the following flags: C, O\n+                 */\n+                bit0 = (res &  1);\n+                bit7 = (res >> 7);\n+                SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n+            }\n+            break;\n+        }\n+        case 2:\n+        {\n+            uint32_t bit0, bit15;\n+            uint16_t res;\n+\n+            if ((count & 0x0f) == 0) {\n+                if (count & 0x10) {\n+                    bit0  = ((uint16_t)decode->op[0].val & 0x1);\n+                    bit15 = ((uint16_t)decode->op[0].val >> 15);\n+                    // of = cf ^ result15\n+                    SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n+                }\n+            } else {\n+                count &= 0x0f; // only use bottom 4 bits\n+                res = ((uint16_t)decode->op[0].val << count) | ((uint16_t)decode->op[0].val >> (16 - count));\n+\n+                write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+                bit0  = (res & 0x1);\n+                bit15 = (res >> 15);\n+                // of = cf ^ result15\n+                SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n+            }\n+            break;\n+        }\n+        case 4:\n+        {\n+            uint32_t bit0, bit31;\n+            uint32_t res;\n+\n+            count &= 0x1f;\n+            if (count) {\n+                res = ((uint32_t)decode->op[0].val << count) | ((uint32_t)decode->op[0].val >> (32 - count));\n+\n+                write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+                bit0  = (res & 0x1);\n+                bit31 = (res >> 31);\n+                // of = cf ^ result31\n+                SET_FLAGS_OxxxxC(cpu, bit0 ^ bit31, bit0);\n+            }\n+            break;\n+        }\n+    }\n+    RIP(cpu) += decode->len;\n+}\n+\n+\n+void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t count;\n+    int of = 0, cf = 0;\n+\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+    count = decode->op[1].val & 0x1f;\n+\n+    switch(decode->operand_size) {\n+        case 1:\n+        {\n+            uint8_t op1_8 = decode->op[0].val;\n+            uint8_t res;\n+            count %= 9;\n+            if (!count)\n+                break;\n+\n+            if (1 == count)\n+                res = (op1_8 << 1) | get_CF(cpu);\n+            else\n+                res = (op1_8 << count) | (get_CF(cpu) << (count - 1)) | (op1_8 >> (9 - count));\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+\n+            cf = (op1_8 >> (8 - count)) & 0x01;\n+            of = cf ^ (res >> 7); // of = cf ^ result7\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 2:\n+        {\n+            uint16_t res;\n+            uint16_t op1_16 = decode->op[0].val;\n+\n+            count %= 17;\n+            if (!count)\n+                break;\n+\n+            if (1 == count)\n+                res = (op1_16 << 1) | get_CF(cpu);\n+            else if (count == 16)\n+                res = (get_CF(cpu) << 15) | (op1_16 >> 1);\n+            else  // 2..15\n+                res = (op1_16 << count) | (get_CF(cpu) << (count - 1)) | (op1_16 >> (17 - count));\n+            \n+            write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+            \n+            cf = (op1_16 >> (16 - count)) & 0x1;\n+            of = cf ^ (res >> 15); // of = cf ^ result15\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 4:\n+        {\n+            uint32_t res;\n+            uint32_t op1_32 = decode->op[0].val;\n+\n+            if (!count)\n+                break;\n+\n+            if (1 == count)\n+                res = (op1_32 << 1) | get_CF(cpu);\n+            else\n+                res = (op1_32 << count) | (get_CF(cpu) << (count - 1)) | (op1_32 >> (33 - count));\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+            cf = (op1_32 >> (32 - count)) & 0x1;\n+            of = cf ^ (res >> 31); // of = cf ^ result31\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+    }\n+    RIP(cpu) += decode->len;\n+}\n+\n+void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    uint8_t count;\n+    int of = 0, cf = 0;\n+\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+    count = decode->op[1].val & 0x1f;\n+\n+    switch(decode->operand_size) {\n+        case 1:\n+        {\n+            uint8_t op1_8 = decode->op[0].val;\n+            uint8_t res;\n+\n+            count %= 9;\n+            if (!count)\n+                break;\n+            res = (op1_8 >> count) | (get_CF(cpu) << (8 - count)) | (op1_8 << (9 - count));\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+\n+            cf = (op1_8 >> (count - 1)) & 0x1;\n+            of = (((res << 1) ^ res) >> 7) & 0x1; // of = result6 ^ result7\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 2:\n+        {\n+            uint16_t op1_16 = decode->op[0].val;\n+            uint16_t res;\n+\n+            count %= 17;\n+            if (!count)\n+                break;\n+            res = (op1_16 >> count) | (get_CF(cpu) << (16 - count)) | (op1_16 << (17 - count));\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+\n+            cf = (op1_16 >> (count - 1)) & 0x1;\n+            of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; // of = result15 ^ result14\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+        case 4:\n+        {\n+            uint32_t res;\n+            uint32_t op1_32 = decode->op[0].val;\n+\n+            if (!count)\n+                break;\n+ \n+            if (1 == count)\n+                res = (op1_32 >> 1) | (get_CF(cpu) << 31);\n+            else\n+                res = (op1_32 >> count) | (get_CF(cpu) << (32 - count)) | (op1_32 << (33 - count));\n+\n+            write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+            cf = (op1_32 >> (count - 1)) & 0x1;\n+            of = ((res << 1) ^ res) >> 31; // of = result30 ^ result31\n+            SET_FLAGS_OxxxxC(cpu, of, cf);\n+            break;\n+        }\n+    }\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_xchg(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    fetch_operands(cpu, decode, 2, true, true, false);\n+\n+    write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, decode->operand_size);\n+    write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val, decode->operand_size);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static void exec_xadd(struct CPUState *cpu, struct x86_decode *decode)\n+{\n+    EXEC_2OP_ARITH_CMD(cpu, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n+    write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val, decode->operand_size);\n+\n+    RIP(cpu) += decode->len;\n+}\n+\n+static struct cmd_handler {\n+    enum x86_decode_cmd cmd;\n+    void (*handler)(struct CPUState *cpu, struct x86_decode *ins);\n+} handlers[] = {\n+    {X86_DECODE_CMD_INVL, NULL,},\n+    {X86_DECODE_CMD_MOV, exec_mov},\n+    {X86_DECODE_CMD_ADD, exec_add},\n+    {X86_DECODE_CMD_OR, exec_or},\n+    {X86_DECODE_CMD_ADC, exec_adc},\n+    {X86_DECODE_CMD_SBB, exec_sbb},\n+    {X86_DECODE_CMD_AND, exec_and},\n+    {X86_DECODE_CMD_SUB, exec_sub},\n+    {X86_DECODE_CMD_NEG, exec_neg},\n+    {X86_DECODE_CMD_XOR, exec_xor},\n+    {X86_DECODE_CMD_CMP, exec_cmp},\n+    {X86_DECODE_CMD_INC, exec_inc},\n+    {X86_DECODE_CMD_DEC, exec_dec},\n+    {X86_DECODE_CMD_TST, exec_tst},\n+    {X86_DECODE_CMD_NOT, exec_not},\n+    {X86_DECODE_CMD_MOVZX, exec_movzx},\n+    {X86_DECODE_CMD_OUT, exec_out},\n+    {X86_DECODE_CMD_IN, exec_in},\n+    {X86_DECODE_CMD_INS, exec_ins},\n+    {X86_DECODE_CMD_OUTS, exec_outs},\n+    {X86_DECODE_CMD_RDMSR, exec_rdmsr},\n+    {X86_DECODE_CMD_WRMSR, exec_wrmsr},\n+    {X86_DECODE_CMD_BT, exec_bt},\n+    {X86_DECODE_CMD_BTR, exec_btr},\n+    {X86_DECODE_CMD_BTC, exec_btc},\n+    {X86_DECODE_CMD_BTS, exec_bts},\n+    {X86_DECODE_CMD_SHL, exec_shl},\n+    {X86_DECODE_CMD_ROL, exec_rol},\n+    {X86_DECODE_CMD_ROR, exec_ror},\n+    {X86_DECODE_CMD_RCR, exec_rcr},\n+    {X86_DECODE_CMD_RCL, exec_rcl},\n+    /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/\n+    {X86_DECODE_CMD_MOVS, exec_movs},\n+    {X86_DECODE_CMD_CMPS, exec_cmps},\n+    {X86_DECODE_CMD_STOS, exec_stos},\n+    {X86_DECODE_CMD_SCAS, exec_scas},\n+    {X86_DECODE_CMD_LODS, exec_lods},\n+    {X86_DECODE_CMD_MOVSX, exec_movsx},\n+    {X86_DECODE_CMD_XCHG, exec_xchg},\n+    {X86_DECODE_CMD_XADD, exec_xadd},\n+};\n+\n+static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];\n+\n+static void init_cmd_handler(CPUState *cpu)\n+{\n+    int i;\n+    for (i = 0; i < ARRAY_SIZE(handlers); i++)\n+        _cmd_handler[handlers[i].cmd] = handlers[i];\n+}\n+\n+static void print_debug(struct CPUState *cpu)\n+{\n+    printf(\"%llx: eax %llx ebx %llx ecx %llx edx %llx esi %llx edi %llx ebp %llx esp %llx flags %llx\\n\", RIP(cpu), RAX(cpu), RBX(cpu), RCX(cpu), RDX(cpu), RSI(cpu), RDI(cpu), RBP(cpu), RSP(cpu), EFLAGS(cpu));\n+}\n+\n+void load_regs(struct CPUState *cpu)\n+{\n+    int i = 0;\n+    RRX(cpu, REG_RAX) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+    RRX(cpu, REG_RBX) = rreg(cpu->hvf_fd, HV_X86_RBX);\n+    RRX(cpu, REG_RCX) = rreg(cpu->hvf_fd, HV_X86_RCX);\n+    RRX(cpu, REG_RDX) = rreg(cpu->hvf_fd, HV_X86_RDX);\n+    RRX(cpu, REG_RSI) = rreg(cpu->hvf_fd, HV_X86_RSI);\n+    RRX(cpu, REG_RDI) = rreg(cpu->hvf_fd, HV_X86_RDI);\n+    RRX(cpu, REG_RSP) = rreg(cpu->hvf_fd, HV_X86_RSP);\n+    RRX(cpu, REG_RBP) = rreg(cpu->hvf_fd, HV_X86_RBP);\n+    for (i = 8; i < 16; i++)\n+        RRX(cpu, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);\n+    \n+    RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+    rflags_to_lflags(cpu);\n+    RIP(cpu) = rreg(cpu->hvf_fd, HV_X86_RIP);\n+\n+    //print_debug(cpu);\n+}\n+\n+void store_regs(struct CPUState *cpu)\n+{\n+    int i = 0;\n+    wreg(cpu->hvf_fd, HV_X86_RAX, RAX(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RBX, RBX(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RCX, RCX(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RDX, RDX(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RSI, RSI(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RDI, RDI(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RBP, RBP(cpu));\n+    wreg(cpu->hvf_fd, HV_X86_RSP, RSP(cpu));\n+    for (i = 8; i < 16; i++)\n+        wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(cpu, i));\n+    \n+    lflags_to_rflags(cpu);\n+    wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(cpu));\n+    macvm_set_rip(cpu, RIP(cpu));\n+\n+    //print_debug(cpu);\n+}\n+\n+bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins)\n+{\n+    //if (hvf_vcpu_id(cpu))\n+    //printf(\"%d, %llx: exec_instruction %s\\n\", hvf_vcpu_id(cpu),  RIP(cpu), decode_cmd_to_string(ins->cmd));\n+    \n+    if (0 && ins->is_fpu) {\n+        VM_PANIC(\"emulate fpu\\n\");\n+    } else {\n+        if (!_cmd_handler[ins->cmd].handler) {\n+            printf(\"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu), ins->cmd, ins->opcode[0],\n+                   ins->opcode_len > 1 ? ins->opcode[1] : 0);\n+            RIP(cpu) += ins->len;\n+            return true;\n+        }\n+        \n+        VM_PANIC_ON_EX(!_cmd_handler[ins->cmd].handler, \"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu), ins->cmd, ins->opcode[0], ins->opcode_len > 1 ? ins->opcode[1] : 0);\n+        _cmd_handler[ins->cmd].handler(cpu, ins);\n+    }\n+    return true;\n+}\n+\n+void init_emu(struct CPUState *cpu)\n+{\n+    init_cmd_handler(cpu);\n+}\ndiff --git a/target/i386/hvf-utils/x86_emu.h b/target/i386/hvf-utils/x86_emu.h\nnew file mode 100644\nindex 0000000000..c56b2798fa\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_emu.h\n@@ -0,0 +1,16 @@\n+#ifndef __X86_EMU_H__\n+#define __X86_EMU_H__\n+\n+#include \"x86.h\"\n+#include \"x86_decode.h\"\n+\n+void init_emu(struct CPUState *cpu);\n+bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins);\n+\n+void load_regs(struct CPUState *cpu);\n+void store_regs(struct CPUState *cpu);\n+\n+void simulate_rdmsr(struct CPUState *cpu);\n+void simulate_wrmsr(struct CPUState *cpu);\n+\n+#endif\ndiff --git a/target/i386/hvf-utils/x86_flags.c b/target/i386/hvf-utils/x86_flags.c\nnew file mode 100644\nindex 0000000000..ca876d03dd\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_flags.c\n@@ -0,0 +1,317 @@\n+/////////////////////////////////////////////////////////////////////////\n+//\n+//  Copyright (C) 2001-2012  The Bochs Project\n+//  Copyright (C) 2017 Google Inc.\n+//\n+//  This library is free software; you can redistribute it and/or\n+//  modify it under the terms of the GNU Lesser General Public\n+//  License as published by the Free Software Foundation; either\n+//  version 2 of the License, or (at your option) any later version.\n+//\n+//  This library is distributed in the hope that it will be useful,\n+//  but WITHOUT ANY WARRANTY; without even the implied warranty of\n+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+//  Lesser General Public License for more details.\n+//\n+//  You should have received a copy of the GNU Lesser General Public\n+//  License along with this library; if not, write to the Free Software\n+//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA\n+/////////////////////////////////////////////////////////////////////////\n+/*\n+ * flags functions\n+ */\n+\n+#include \"qemu/osdep.h\"\n+#include \"qemu-common.h\"\n+\n+#include \"cpu.h\"\n+#include \"x86_flags.h\"\n+#include \"x86.h\"\n+\n+void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf)\n+{\n+    uint32_t temp_po = new_of ^ new_cf;\n+    cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);\n+    cpu->hvf_x86->lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);\n+}\n+\n+void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+{\n+    SET_FLAGS_OSZAPC_SUB_32(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+{\n+    SET_FLAGS_OSZAPC_SUB_16(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+{\n+    SET_FLAGS_OSZAPC_SUB_8(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+{\n+    SET_FLAGS_OSZAPC_ADD_32(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+{\n+    SET_FLAGS_OSZAPC_ADD_16(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+{\n+    SET_FLAGS_OSZAPC_ADD_8(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+{\n+    SET_FLAGS_OSZAP_SUB_32(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+{\n+    SET_FLAGS_OSZAP_SUB_16(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+{\n+    SET_FLAGS_OSZAP_SUB_8(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+{\n+    SET_FLAGS_OSZAP_ADD_32(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+{\n+    SET_FLAGS_OSZAP_ADD_16(v1, v2, diff);\n+}\n+\n+void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+{\n+    SET_FLAGS_OSZAP_ADD_8(v1, v2, diff);\n+}\n+\n+\n+void SET_FLAGS_OSZAPC_LOGIC32(struct CPUState *cpu, uint32_t diff)\n+{\n+    SET_FLAGS_OSZAPC_LOGIC_32(diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_LOGIC16(struct CPUState *cpu, uint16_t diff)\n+{\n+    SET_FLAGS_OSZAPC_LOGIC_16(diff);\n+}\n+\n+void SET_FLAGS_OSZAPC_LOGIC8(struct CPUState *cpu, uint8_t diff)\n+{\n+    SET_FLAGS_OSZAPC_LOGIC_8(diff);\n+}\n+\n+void SET_FLAGS_SHR32(struct CPUState *cpu, uint32_t v, int count, uint32_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+    int of = (((res << 1) ^ res) >> 31);\n+\n+    SET_FLAGS_OSZAPC_LOGIC_32(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+void SET_FLAGS_SHR16(struct CPUState *cpu, uint16_t v, int count, uint16_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+    int of = (((res << 1) ^ res) >> 15);\n+\n+    SET_FLAGS_OSZAPC_LOGIC_16(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+void SET_FLAGS_SHR8(struct CPUState *cpu, uint8_t v, int count, uint8_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+    int of = (((res << 1) ^ res) >> 7);\n+\n+    SET_FLAGS_OSZAPC_LOGIC_8(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+void SET_FLAGS_SAR32(struct CPUState *cpu, int32_t v, int count, uint32_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+\n+    SET_FLAGS_OSZAPC_LOGIC_32(res);\n+    SET_FLAGS_OxxxxC(cpu, 0, cf);\n+}\n+\n+void SET_FLAGS_SAR16(struct CPUState *cpu, int16_t v, int count, uint16_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+\n+    SET_FLAGS_OSZAPC_LOGIC_16(res);\n+    SET_FLAGS_OxxxxC(cpu, 0, cf);\n+}\n+\n+void SET_FLAGS_SAR8(struct CPUState *cpu, int8_t v, int count, uint8_t res)\n+{\n+    int cf = (v >> (count - 1)) & 0x1;\n+\n+    SET_FLAGS_OSZAPC_LOGIC_8(res);\n+    SET_FLAGS_OxxxxC(cpu, 0, cf);\n+}\n+\n+\n+void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res)\n+{\n+    int of, cf;\n+\n+    cf = (v >> (32 - count)) & 0x1;\n+    of = cf ^ (res >> 31);\n+\n+    SET_FLAGS_OSZAPC_LOGIC_32(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res)\n+{\n+    int of = 0, cf = 0;\n+\n+    if (count <= 16) {\n+        cf = (v >> (16 - count)) & 0x1;\n+        of = cf ^ (res >> 15);\n+    }\n+\n+    SET_FLAGS_OSZAPC_LOGIC_16(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res)\n+{\n+    int of = 0, cf = 0;\n+\n+    if (count <= 8) {\n+        cf = (v >> (8 - count)) & 0x1;\n+        of = cf ^ (res >> 7);\n+    }\n+\n+    SET_FLAGS_OSZAPC_LOGIC_8(res);\n+    SET_FLAGS_OxxxxC(cpu, of, cf);\n+}\n+\n+bool get_PF(struct CPUState *cpu)\n+{\n+    uint32_t temp = (255 & cpu->hvf_x86->lflags.result);\n+    temp = temp ^ (255 & (cpu->hvf_x86->lflags.auxbits >> LF_BIT_PDB));\n+    temp = (temp ^ (temp >> 4)) & 0x0F;\n+    return (0x9669U >> temp) & 1;\n+}\n+\n+void set_PF(struct CPUState *cpu, bool val)\n+{\n+    uint32_t temp = (255 & cpu->hvf_x86->lflags.result) ^ (!val);\n+    cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_PDB);\n+    cpu->hvf_x86->lflags.auxbits |= (temp << LF_BIT_PDB);\n+}\n+\n+bool _get_OF(struct CPUState *cpu)\n+{\n+    return ((cpu->hvf_x86->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;\n+}\n+\n+bool get_OF(struct CPUState *cpu)\n+{\n+    return _get_OF(cpu);\n+}\n+\n+bool _get_CF(struct CPUState *cpu)\n+{\n+    return (cpu->hvf_x86->lflags.auxbits >> LF_BIT_CF) & 1;\n+}\n+\n+bool get_CF(struct CPUState *cpu)\n+{\n+    return _get_CF(cpu);\n+}\n+\n+void set_OF(struct CPUState *cpu, bool val)\n+{\n+    SET_FLAGS_OxxxxC(cpu, val, _get_CF(cpu));\n+}\n+\n+void set_CF(struct CPUState *cpu, bool val)\n+{\n+    SET_FLAGS_OxxxxC(cpu, _get_OF(cpu), (val));\n+}\n+\n+bool get_AF(struct CPUState *cpu)\n+{\n+    return (cpu->hvf_x86->lflags.auxbits >> LF_BIT_AF) & 1;\n+}\n+\n+void set_AF(struct CPUState *cpu, bool val)\n+{\n+    cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_AF);\n+    cpu->hvf_x86->lflags.auxbits |= (val) << LF_BIT_AF;\n+}\n+\n+bool get_ZF(struct CPUState *cpu)\n+{\n+    return !cpu->hvf_x86->lflags.result;\n+}\n+\n+void set_ZF(struct CPUState *cpu, bool val)\n+{\n+    if (val) {\n+        cpu->hvf_x86->lflags.auxbits ^= (((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);\n+        // merge the parity bits into the Parity Delta Byte\n+        uint32_t temp_pdb = (255 & cpu->hvf_x86->lflags.result);\n+        cpu->hvf_x86->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);\n+        // now zero the .result value\n+        cpu->hvf_x86->lflags.result = 0;\n+    } else\n+        cpu->hvf_x86->lflags.result |= (1 << 8);\n+}\n+\n+bool get_SF(struct CPUState *cpu)\n+{\n+    return ((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) ^ (cpu->hvf_x86->lflags.auxbits >> LF_BIT_SD)) & 1;\n+}\n+\n+void set_SF(struct CPUState *cpu, bool val)\n+{\n+    bool temp_sf = get_SF(cpu);\n+    cpu->hvf_x86->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;\n+}\n+\n+void set_OSZAPC(struct CPUState *cpu, uint32_t flags32)\n+{\n+    set_OF(cpu, cpu->hvf_x86->rflags.of);\n+    set_SF(cpu, cpu->hvf_x86->rflags.sf);\n+    set_ZF(cpu, cpu->hvf_x86->rflags.zf);\n+    set_AF(cpu, cpu->hvf_x86->rflags.af);\n+    set_PF(cpu, cpu->hvf_x86->rflags.pf);\n+    set_CF(cpu, cpu->hvf_x86->rflags.cf);\n+}\n+\n+void lflags_to_rflags(struct CPUState *cpu)\n+{\n+    cpu->hvf_x86->rflags.cf = get_CF(cpu);\n+    cpu->hvf_x86->rflags.pf = get_PF(cpu);\n+    cpu->hvf_x86->rflags.af = get_AF(cpu);\n+    cpu->hvf_x86->rflags.zf = get_ZF(cpu);\n+    cpu->hvf_x86->rflags.sf = get_SF(cpu);\n+    cpu->hvf_x86->rflags.of = get_OF(cpu);\n+}\n+\n+void rflags_to_lflags(struct CPUState *cpu)\n+{\n+    cpu->hvf_x86->lflags.auxbits = cpu->hvf_x86->lflags.result = 0;\n+    set_OF(cpu, cpu->hvf_x86->rflags.of);\n+    set_SF(cpu, cpu->hvf_x86->rflags.sf);\n+    set_ZF(cpu, cpu->hvf_x86->rflags.zf);\n+    set_AF(cpu, cpu->hvf_x86->rflags.af);\n+    set_PF(cpu, cpu->hvf_x86->rflags.pf);\n+    set_CF(cpu, cpu->hvf_x86->rflags.cf);\n+}\ndiff --git a/target/i386/hvf-utils/x86_flags.h b/target/i386/hvf-utils/x86_flags.h\nnew file mode 100644\nindex 0000000000..f963f8ad1b\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_flags.h\n@@ -0,0 +1,218 @@\n+/////////////////////////////////////////////////////////////////////////\n+//\n+//  Copyright (C) 2001-2012  The Bochs Project\n+//  Copyright (C) 2017 Google Inc.\n+//\n+//  This library is free software; you can redistribute it and/or\n+//  modify it under the terms of the GNU Lesser General Public\n+//  License as published by the Free Software Foundation; either\n+//  version 2 of the License, or (at your option) any later version.\n+//\n+//  This library is distributed in the hope that it will be useful,\n+//  but WITHOUT ANY WARRANTY; without even the implied warranty of\n+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+//  Lesser General Public License for more details.\n+//\n+//  You should have received a copy of the GNU Lesser General Public\n+//  License along with this library; if not, write to the Free Software\n+//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA\n+/////////////////////////////////////////////////////////////////////////\n+/*\n+ * x86 eflags functions\n+ */\n+#ifndef __X86_FLAGS_H__\n+#define __X86_FLAGS_H__\n+\n+#include \"x86_gen.h\"\n+\n+/* this is basically bocsh code */\n+\n+typedef struct lazy_flags {\n+    addr_t result;\n+    addr_t auxbits;\n+} lazy_flags;\n+\n+#define LF_SIGN_BIT     31\n+\n+#define LF_BIT_SD      (0)          /* lazy Sign Flag Delta            */\n+#define LF_BIT_AF      (3)          /* lazy Adjust flag                */\n+#define LF_BIT_PDB     (8)          /* lazy Parity Delta Byte (8 bits) */\n+#define LF_BIT_CF      (31)         /* lazy Carry Flag                 */\n+#define LF_BIT_PO      (30)         /* lazy Partial Overflow = CF ^ OF */\n+\n+#define LF_MASK_SD     (0x01 << LF_BIT_SD)\n+#define LF_MASK_AF     (0x01 << LF_BIT_AF)\n+#define LF_MASK_PDB    (0xFF << LF_BIT_PDB)\n+#define LF_MASK_CF     (0x01 << LF_BIT_CF)\n+#define LF_MASK_PO     (0x01 << LF_BIT_PO)\n+\n+#define ADD_COUT_VEC(op1, op2, result) \\\n+   (((op1) & (op2)) | (((op1) | (op2)) & (~(result))))\n+\n+#define SUB_COUT_VEC(op1, op2, result) \\\n+   (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))\n+\n+#define GET_ADD_OVERFLOW(op1, op2, result, mask) \\\n+   ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))\n+\n+// *******************\n+// OSZAPC\n+// *******************\n+\n+/* size, carries, result */\n+#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \\\n+    addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \\\n+    (((lf_carries) >> (size - 2)) << LF_BIT_PO); \\\n+    cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n+    if ((size) == 32) temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n+    else if ((size) == 16) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n+    else if ((size) == 8)  temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n+    else VM_PANIC(\"unimplemented\");                                                    \\\n+    cpu->hvf_x86->lflags.auxbits = (addr_t)(uint32_t)temp; \\\n+}\n+\n+/* carries, result */\n+#define SET_FLAGS_OSZAPC_8(carries, result) \\\n+    SET_FLAGS_OSZAPC_SIZE(8, carries, result)\n+#define SET_FLAGS_OSZAPC_16(carries, result) \\\n+    SET_FLAGS_OSZAPC_SIZE(16, carries, result)\n+#define SET_FLAGS_OSZAPC_32(carries, result) \\\n+    SET_FLAGS_OSZAPC_SIZE(32, carries, result)\n+\n+/* result */\n+#define SET_FLAGS_OSZAPC_LOGIC_8(result_8) \\\n+    SET_FLAGS_OSZAPC_8(0, (result_8))\n+#define SET_FLAGS_OSZAPC_LOGIC_16(result_16) \\\n+    SET_FLAGS_OSZAPC_16(0, (result_16))\n+#define SET_FLAGS_OSZAPC_LOGIC_32(result_32) \\\n+    SET_FLAGS_OSZAPC_32(0, (result_32))\n+#define SET_FLAGS_OSZAPC_LOGIC_SIZE(size, result) {             \\\n+    if (32 == size) {SET_FLAGS_OSZAPC_LOGIC_32(result);}        \\\n+    else if (16 == size) {SET_FLAGS_OSZAPC_LOGIC_16(result);}   \\\n+    else if (8 == size) {SET_FLAGS_OSZAPC_LOGIC_8(result);}     \\\n+    else VM_PANIC(\"unimplemented\");                            \\\n+}\n+\n+/* op1, op2, result */\n+#define SET_FLAGS_OSZAPC_ADD_8(op1_8, op2_8, sum_8) \\\n+    SET_FLAGS_OSZAPC_8(ADD_COUT_VEC((op1_8), (op2_8), (sum_8)), (sum_8))\n+#define SET_FLAGS_OSZAPC_ADD_16(op1_16, op2_16, sum_16) \\\n+    SET_FLAGS_OSZAPC_16(ADD_COUT_VEC((op1_16), (op2_16), (sum_16)), (sum_16))\n+#define SET_FLAGS_OSZAPC_ADD_32(op1_32, op2_32, sum_32) \\\n+    SET_FLAGS_OSZAPC_32(ADD_COUT_VEC((op1_32), (op2_32), (sum_32)), (sum_32))\n+\n+/* op1, op2, result */\n+#define SET_FLAGS_OSZAPC_SUB_8(op1_8, op2_8, diff_8) \\\n+    SET_FLAGS_OSZAPC_8(SUB_COUT_VEC((op1_8), (op2_8), (diff_8)), (diff_8))\n+#define SET_FLAGS_OSZAPC_SUB_16(op1_16, op2_16, diff_16) \\\n+    SET_FLAGS_OSZAPC_16(SUB_COUT_VEC((op1_16), (op2_16), (diff_16)), (diff_16))\n+#define SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32) \\\n+    SET_FLAGS_OSZAPC_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))\n+\n+// *******************\n+// OSZAP\n+// *******************\n+/* size, carries, result */\n+#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \\\n+    addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \\\n+    (((lf_carries) >> (size - 2)) << LF_BIT_PO); \\\n+    if ((size) == 32) temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n+    else if ((size) == 16) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n+    else if ((size) == 8)  temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n+    else VM_PANIC(\"unimplemented\");                                                    \\\n+    cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n+    addr_t delta_c = (cpu->hvf_x86->lflags.auxbits ^ temp) & LF_MASK_CF; \\\n+    delta_c ^= (delta_c >> 1); \\\n+    cpu->hvf_x86->lflags.auxbits = (addr_t)(uint32_t)(temp ^ delta_c); \\\n+}\n+\n+/* carries, result */\n+#define SET_FLAGS_OSZAP_8(carries, result) \\\n+    SET_FLAGS_OSZAP_SIZE(8, carries, result)\n+#define SET_FLAGS_OSZAP_16(carries, result) \\\n+    SET_FLAGS_OSZAP_SIZE(16, carries, result)\n+#define SET_FLAGS_OSZAP_32(carries, result) \\\n+    SET_FLAGS_OSZAP_SIZE(32, carries, result)\n+\n+/* op1, op2, result */\n+#define SET_FLAGS_OSZAP_ADD_8(op1_8, op2_8, sum_8) \\\n+    SET_FLAGS_OSZAP_8(ADD_COUT_VEC((op1_8), (op2_8), (sum_8)), (sum_8))\n+#define SET_FLAGS_OSZAP_ADD_16(op1_16, op2_16, sum_16) \\\n+    SET_FLAGS_OSZAP_16(ADD_COUT_VEC((op1_16), (op2_16), (sum_16)), (sum_16))\n+#define SET_FLAGS_OSZAP_ADD_32(op1_32, op2_32, sum_32) \\\n+    SET_FLAGS_OSZAP_32(ADD_COUT_VEC((op1_32), (op2_32), (sum_32)), (sum_32))\n+\n+/* op1, op2, result */\n+#define SET_FLAGS_OSZAP_SUB_8(op1_8, op2_8, diff_8) \\\n+    SET_FLAGS_OSZAP_8(SUB_COUT_VEC((op1_8), (op2_8), (diff_8)), (diff_8))\n+#define SET_FLAGS_OSZAP_SUB_16(op1_16, op2_16, diff_16) \\\n+    SET_FLAGS_OSZAP_16(SUB_COUT_VEC((op1_16), (op2_16), (diff_16)), (diff_16))\n+#define SET_FLAGS_OSZAP_SUB_32(op1_32, op2_32, diff_32) \\\n+    SET_FLAGS_OSZAP_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))\n+\n+// *******************\n+// OSZAxC\n+// *******************\n+/* size, carries, result */\n+#define SET_FLAGS_OSZAxC_LOGIC_SIZE(size, lf_result) { \\\n+    bool saved_PF = getB_PF(); \\\n+    SET_FLAGS_OSZAPC_SIZE(size, (int##size##_t)(0), lf_result); \\\n+    set_PF(saved_PF); \\\n+}\n+\n+/* result */\n+#define SET_FLAGS_OSZAxC_LOGIC_32(result_32) \\\n+    SET_FLAGS_OSZAxC_LOGIC_SIZE(32, (result_32))\n+\n+void lflags_to_rflags(struct CPUState *cpu);\n+void rflags_to_lflags(struct CPUState *cpu);\n+\n+bool get_PF(struct CPUState *cpu);\n+void set_PF(struct CPUState *cpu, bool val);\n+bool get_CF(struct CPUState *cpu);\n+void set_CF(struct CPUState *cpu, bool val);\n+bool get_AF(struct CPUState *cpu);\n+void set_AF(struct CPUState *cpu, bool val);\n+bool get_ZF(struct CPUState *cpu);\n+void set_ZF(struct CPUState *cpu, bool val);\n+bool get_SF(struct CPUState *cpu);\n+void set_SF(struct CPUState *cpu, bool val);\n+bool get_OF(struct CPUState *cpu);\n+void set_OF(struct CPUState *cpu, bool val);\n+void set_OSZAPC(struct CPUState *cpu, uint32_t flags32);\n+\n+void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf);\n+\n+void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n+void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n+void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n+\n+void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n+void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n+void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n+\n+void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n+void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n+void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n+\n+void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n+void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n+void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n+\n+void SET_FLAGS_OSZAPC_LOGIC32(struct CPUState *cpu, uint32_t diff);\n+void SET_FLAGS_OSZAPC_LOGIC16(struct CPUState *cpu, uint16_t diff);\n+void SET_FLAGS_OSZAPC_LOGIC8(struct CPUState *cpu, uint8_t diff);\n+\n+void SET_FLAGS_SHR32(struct CPUState *cpu, uint32_t v, int count, uint32_t res);\n+void SET_FLAGS_SHR16(struct CPUState *cpu, uint16_t v, int count, uint16_t res);\n+void SET_FLAGS_SHR8(struct CPUState *cpu, uint8_t v, int count, uint8_t res);\n+\n+void SET_FLAGS_SAR32(struct CPUState *cpu, int32_t v, int count, uint32_t res);\n+void SET_FLAGS_SAR16(struct CPUState *cpu, int16_t v, int count, uint16_t res);\n+void SET_FLAGS_SAR8(struct CPUState *cpu, int8_t v, int count, uint8_t res);\n+\n+void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res);\n+void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res);\n+void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res);\n+\n+#endif /* __X86_FLAGS_H__ */\ndiff --git a/target/i386/hvf-utils/x86_gen.h b/target/i386/hvf-utils/x86_gen.h\nnew file mode 100644\nindex 0000000000..770ee80100\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_gen.h\n@@ -0,0 +1,36 @@\n+#ifndef __X86_GEN_H__\n+#define __X86_GEN_H__\n+\n+#include <stdlib.h>\n+#include <stdio.h>\n+#include \"qemu-common.h\"\n+\n+typedef uint64_t addr_t;\n+\n+#define VM_PANIC(x) {\\\n+    printf(\"%s\\n\", x); \\\n+    abort(); \\\n+}\n+\n+#define VM_PANIC_ON(x) {\\\n+    if (x) { \\\n+        printf(\"%s\\n\", #x); \\\n+        abort(); \\\n+    } \\\n+}\n+\n+#define VM_PANIC_EX(...) {\\\n+    printf(__VA_ARGS__); \\\n+    abort(); \\\n+}\n+\n+#define VM_PANIC_ON_EX(x, ...) {\\\n+    if (x) { \\\n+        printf(__VA_ARGS__); \\\n+        abort(); \\\n+    } \\\n+}\n+\n+#define ZERO_INIT(obj) memset((void *) &obj, 0, sizeof(obj))\n+\n+#endif\ndiff --git a/target/i386/hvf-utils/x86_mmu.c b/target/i386/hvf-utils/x86_mmu.c\nnew file mode 100644\nindex 0000000000..00fae735be\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_mmu.c\n@@ -0,0 +1,254 @@\n+/*\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+#include \"qemu/osdep.h\"\n+\n+#include \"qemu-common.h\"\n+#include \"x86.h\"\n+#include \"x86_mmu.h\"\n+#include \"string.h\"\n+#include \"vmcs.h\"\n+#include \"vmx.h\"\n+\n+#include \"memory.h\"\n+#include \"exec/address-spaces.h\"\n+\n+#define pte_present(pte) (pte & PT_PRESENT)\n+#define pte_write_access(pte) (pte & PT_WRITE)\n+#define pte_user_access(pte) (pte & PT_USER)\n+#define pte_exec_access(pte) (!(pte & PT_NX))\n+\n+#define pte_large_page(pte) (pte & PT_PS)\n+#define pte_global_access(pte) (pte & PT_GLOBAL)\n+\n+#define PAE_CR3_MASK                (~0x1fllu)\n+#define LEGACY_CR3_MASK             (0xffffffff)\n+\n+#define LEGACY_PTE_PAGE_MASK        (0xffffffffllu << 12)\n+#define PAE_PTE_PAGE_MASK           ((-1llu << 12) & ((1llu << 52) - 1))\n+#define PAE_PTE_LARGE_PAGE_MASK     ((-1llu << (21)) & ((1llu << 52) - 1))\n+\n+struct gpt_translation {\n+    addr_t  gva;\n+    addr_t gpa;\n+    int    err_code;\n+    uint64_t pte[5];\n+    bool write_access;\n+    bool user_access;\n+    bool exec_access;\n+};\n+\n+static int gpt_top_level(struct CPUState *cpu, bool pae)\n+{\n+    if (!pae)\n+        return 2;\n+    if (x86_is_long_mode(cpu))\n+        return 4;\n+\n+    return 3;\n+}\n+\n+static inline int gpt_entry(addr_t addr, int level, bool pae)\n+{\n+    int level_shift = pae ? 9 : 10;\n+    return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);\n+}\n+\n+static inline int pte_size(bool pae)\n+{\n+    return pae ? 8 : 4;\n+}\n+\n+\n+static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int level, bool pae)\n+{\n+    int index;\n+    uint64_t pte = 0;\n+    addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n+    addr_t gpa = pt->pte[level] & page_mask;\n+\n+    if (level == 3 && !x86_is_long_mode(cpu))\n+        gpa = pt->pte[level];\n+\n+    index = gpt_entry(pt->gva, level, pae);\n+    address_space_rw(&address_space_memory, gpa + index * pte_size(pae), MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);\n+\n+    pt->pte[level - 1] = pte;\n+\n+    return true;\n+}\n+\n+/* test page table entry */\n+static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int level, bool *is_large, bool pae)\n+{\n+    uint64_t pte = pt->pte[level];\n+    \n+    if (pt->write_access)\n+        pt->err_code |= MMU_PAGE_WT;\n+    if (pt->user_access)\n+        pt->err_code |= MMU_PAGE_US;\n+    if (pt->exec_access)\n+        pt->err_code |= MMU_PAGE_NX;\n+\n+    if (!pte_present(pte)) {\n+        addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n+        return false;\n+    }\n+    \n+    if (pae && !x86_is_long_mode(cpu) && 2 == level)\n+        goto exit;\n+    \n+    if (1 == level && pte_large_page(pte)) {\n+        pt->err_code |= MMU_PAGE_PT;\n+        *is_large = true;\n+    }\n+    if (!level)\n+        pt->err_code |= MMU_PAGE_PT;\n+        \n+    addr_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    /* check protection */\n+    if (cr0 & CR0_WP) {\n+        if (pt->write_access && !pte_write_access(pte)) {\n+            return false;\n+        }\n+    }\n+\n+    if (pt->user_access && !pte_user_access(pte)) {\n+        return false;\n+    }\n+\n+    if (pae && pt->exec_access && !pte_exec_access(pte)) {\n+        return false;\n+    }\n+    \n+exit:\n+    /* TODO: check reserved bits */\n+    return true;\n+}\n+\n+static inline uint64_t pse_pte_to_page(uint64_t pte)\n+{\n+    return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);\n+}\n+\n+static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)\n+{\n+    VM_PANIC_ON(!pte_large_page(pt->pte[1]))\n+    /* 2Mb large page  */\n+    if (pae)\n+        return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);\n+    \n+    /* 4Mb large page */\n+    return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);\n+}\n+\n+\n+\n+static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code, struct gpt_translation* pt, bool pae)\n+{\n+    int top_level, level;\n+    bool is_large = false;\n+    addr_t cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);\n+    addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n+    \n+    memset(pt, 0, sizeof(*pt));\n+    top_level = gpt_top_level(cpu, pae);\n+\n+    pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);\n+    pt->gva = addr;\n+    pt->user_access = (err_code & MMU_PAGE_US);\n+    pt->write_access = (err_code & MMU_PAGE_WT);\n+    pt->exec_access = (err_code & MMU_PAGE_NX);\n+    \n+    for (level = top_level; level > 0; level--) {\n+        get_pt_entry(cpu, pt, level, pae);\n+\n+        if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {\n+            return false;\n+        }\n+\n+        if (is_large)\n+            break;\n+    }\n+\n+    if (!is_large)\n+        pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);\n+    else\n+        pt->gpa = large_page_gpa(pt, pae);\n+\n+    return true;\n+}\n+\n+\n+bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa)\n+{\n+    bool res;\n+    struct gpt_translation pt;\n+    int err_code = 0;\n+\n+    if (!x86_is_paging_mode(cpu)) {\n+        *gpa = gva;\n+        return true;\n+    }\n+\n+    res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));\n+    if (res) {\n+        *gpa = pt.gpa;\n+        return true;\n+    }\n+\n+    return false;\n+}\n+\n+void vmx_write_mem(struct CPUState* cpu, addr_t gva, void *data, int bytes)\n+{\n+    addr_t gpa;\n+\n+    while (bytes > 0) {\n+        // copy page\n+        int copy = MIN(bytes, 0x1000 - (gva & 0xfff));\n+\n+        if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {\n+            VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __FUNCTION__, gva);\n+        } else {\n+            address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, copy, 1);\n+        }\n+\n+        bytes -= copy;\n+        gva += copy;\n+        data += copy;\n+    }\n+}\n+\n+void vmx_read_mem(struct CPUState* cpu, void *data, addr_t gva, int bytes)\n+{\n+    addr_t gpa;\n+\n+    while (bytes > 0) {\n+        // copy page\n+        int copy = MIN(bytes, 0x1000 - (gva & 0xfff));\n+\n+        if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {\n+            VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __FUNCTION__, gva);\n+        }\n+        address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, copy, 0);\n+\n+        bytes -= copy;\n+        gva += copy;\n+        data += copy;\n+    }\n+}\ndiff --git a/target/i386/hvf-utils/x86_mmu.h b/target/i386/hvf-utils/x86_mmu.h\nnew file mode 100644\nindex 0000000000..c31bf28982\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86_mmu.h\n@@ -0,0 +1,28 @@\n+#ifndef __X86_MMU_H__\n+#define __X86_MMU_H__\n+\n+#include \"x86_gen.h\"\n+\n+#define PT_PRESENT      (1 << 0)\n+#define PT_WRITE        (1 << 1)\n+#define PT_USER         (1 << 2)\n+#define PT_WT           (1 << 3)\n+#define PT_CD           (1 << 4)\n+#define PT_ACCESSED     (1 << 5)\n+#define PT_DIRTY        (1 << 6)\n+#define PT_PS           (1 << 7)\n+#define PT_GLOBAL       (1 << 8)\n+#define PT_NX           (1llu << 63)\n+\n+// error codes\n+#define MMU_PAGE_PT             (1 << 0)\n+#define MMU_PAGE_WT             (1 << 1)\n+#define MMU_PAGE_US             (1 << 2)\n+#define MMU_PAGE_NX             (1 << 3)\n+\n+bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa);\n+\n+void vmx_write_mem(struct CPUState* cpu, addr_t gva, void *data, int bytes);\n+void vmx_read_mem(struct CPUState* cpu, void *data, addr_t gva, int bytes);\n+\n+#endif /* __X86_MMU_H__ */\ndiff --git a/target/i386/hvf-utils/x86hvf.c b/target/i386/hvf-utils/x86hvf.c\nnew file mode 100644\nindex 0000000000..d5668df37f\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86hvf.c\n@@ -0,0 +1,501 @@\n+/*\n+ * Copyright (c) 2003-2008 Fabrice Bellard\n+ * Copyright (C) 2016 Veertu Inc,\n+ * Copyright (C) 2017 Google Inc,\n+ *\n+ * This program is free software; you can redistribute it and/or\n+ * modify it under the terms of the GNU General Public License as\n+ * published by the Free Software Foundation; either version 2 or\n+ * (at your option) version 3 of the License.\n+ *\n+ * This program is distributed in the hope that it will be useful,\n+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\n+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n+ * GNU General Public License for more details.\n+ *\n+ * You should have received a copy of the GNU General Public License along\n+ * with this program; if not, see <http://www.gnu.org/licenses/>.\n+ */\n+\n+#include \"qemu/osdep.h\"\n+#include \"qemu-common.h\"\n+\n+#include \"x86hvf.h\"\n+#include \"vmx.h\"\n+#include \"vmcs.h\"\n+#include \"cpu.h\"\n+#include \"x86_descr.h\"\n+#include \"x86_decode.h\"\n+\n+#include \"hw/i386/apic_internal.h\"\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <Hypervisor/hv.h>\n+#include <Hypervisor/hv_vmx.h>\n+#include <stdint.h>\n+\n+void hvf_cpu_synchronize_state(struct CPUState* cpu_state);\n+\n+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr)\n+{\n+    vmx_seg->sel = qseg->selector;\n+    vmx_seg->base = qseg->base;\n+    vmx_seg->limit = qseg->limit;\n+\n+    if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {\n+        // the TR register is usable after processor reset despite having a null selector\n+        vmx_seg->ar = 1 << 16;\n+        return;\n+    }\n+    vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5;\n+    vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4;\n+}\n+\n+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)\n+{\n+    qseg->limit = vmx_seg->limit;\n+    qseg->base = vmx_seg->base;\n+    qseg->selector = vmx_seg->sel;\n+    qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) |\n+                  (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) |\n+                  (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) |\n+                  (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) |\n+                  (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) |\n+                  (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) |\n+                  (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) |\n+                  (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT);\n+}\n+\n+void hvf_put_xsave(CPUState *cpu_state)\n+{\n+\n+    int x;\n+    struct hvf_xsave_buf *xsave;\n+    \n+    xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;\n+    memset(xsave, 0, sizeof(*xsave)); \n+    \n+    memcpy(&xsave->data[4], &X86_CPU(cpu_state)->env.fpdp, sizeof(X86_CPU(cpu_state)->env.fpdp));\n+    memcpy(&xsave->data[2], &X86_CPU(cpu_state)->env.fpip, sizeof(X86_CPU(cpu_state)->env.fpip));\n+    memcpy(&xsave->data[8], &X86_CPU(cpu_state)->env.fpregs, sizeof(X86_CPU(cpu_state)->env.fpregs));\n+    memcpy(&xsave->data[144], &X86_CPU(cpu_state)->env.ymmh_regs, sizeof(X86_CPU(cpu_state)->env.ymmh_regs));\n+    memcpy(&xsave->data[288], &X86_CPU(cpu_state)->env.zmmh_regs, sizeof(X86_CPU(cpu_state)->env.zmmh_regs));\n+    memcpy(&xsave->data[272], &X86_CPU(cpu_state)->env.opmask_regs, sizeof(X86_CPU(cpu_state)->env.opmask_regs));\n+    memcpy(&xsave->data[240], &X86_CPU(cpu_state)->env.bnd_regs, sizeof(X86_CPU(cpu_state)->env.bnd_regs));\n+    memcpy(&xsave->data[256], &X86_CPU(cpu_state)->env.bndcs_regs, sizeof(X86_CPU(cpu_state)->env.bndcs_regs));\n+    memcpy(&xsave->data[416], &X86_CPU(cpu_state)->env.hi16_zmm_regs, sizeof(X86_CPU(cpu_state)->env.hi16_zmm_regs));\n+    \n+    xsave->data[0] = (uint16_t)X86_CPU(cpu_state)->env.fpuc;\n+    xsave->data[0] |= (X86_CPU(cpu_state)->env.fpus << 16);\n+    xsave->data[0] |= (X86_CPU(cpu_state)->env.fpstt & 7) << 11;\n+    \n+    for (x = 0; x < 8; ++x)\n+        xsave->data[1] |= ((!X86_CPU(cpu_state)->env.fptags[x]) << x);\n+    xsave->data[1] |= (uint32_t)(X86_CPU(cpu_state)->env.fpop << 16);\n+    \n+    memcpy(&xsave->data[40], &X86_CPU(cpu_state)->env.xmm_regs, sizeof(X86_CPU(cpu_state)->env.xmm_regs));\n+    \n+    xsave->data[6] = X86_CPU(cpu_state)->env.mxcsr;\n+    *(uint64_t *)&xsave->data[128] = X86_CPU(cpu_state)->env.xstate_bv;\n+    \n+    if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, xsave->data, 4096)){\n+        abort();\n+    }\n+}\n+\n+void vmx_update_tpr(CPUState *cpu);\n+void hvf_put_segments(CPUState *cpu_state)\n+{\n+    CPUX86State *env = &X86_CPU(cpu_state)->env;\n+    struct vmx_segment seg;\n+    \n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);\n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);\n+\n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);\n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);\n+\n+    //wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]);\n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);\n+    vmx_update_tpr(cpu_state);\n+    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);\n+\n+    macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);\n+    macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_CS);\n+    \n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_DS);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_ES);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_SS);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_FS);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_GS);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->tr, true);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_TR);\n+\n+    hvf_set_segment(cpu_state, &seg, &env->ldt, false);\n+    vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR);\n+    \n+    hv_vcpu_flush(cpu_state->hvf_fd);\n+}\n+    \n+void hvf_put_msrs(CPUState *cpu_state)\n+{\n+    CPUX86State *env = &X86_CPU(cpu_state)->env;\n+\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);\n+\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);\n+\n+#ifdef TARGET_X86_64\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);\n+#endif\n+\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);\n+    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);\n+\n+    // if (!osx_is_sierra())\n+    //     wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());\n+    hv_vm_sync_tsc(env->tsc);\n+}\n+\n+\n+void hvf_get_xsave(CPUState *cpu_state)\n+{\n+    int x;\n+    struct hvf_xsave_buf *xsave;\n+    \n+    xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;\n+    \n+    if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, xsave->data, 4096)) {\n+        abort();\n+    }\n+\n+    memcpy(&X86_CPU(cpu_state)->env.fpdp, &xsave->data[4], sizeof(X86_CPU(cpu_state)->env.fpdp));\n+    memcpy(&X86_CPU(cpu_state)->env.fpip, &xsave->data[2], sizeof(X86_CPU(cpu_state)->env.fpip));\n+    memcpy(&X86_CPU(cpu_state)->env.fpregs, &xsave->data[8], sizeof(X86_CPU(cpu_state)->env.fpregs));\n+    memcpy(&X86_CPU(cpu_state)->env.ymmh_regs, &xsave->data[144], sizeof(X86_CPU(cpu_state)->env.ymmh_regs));\n+    memcpy(&X86_CPU(cpu_state)->env.zmmh_regs, &xsave->data[288], sizeof(X86_CPU(cpu_state)->env.zmmh_regs));\n+    memcpy(&X86_CPU(cpu_state)->env.opmask_regs, &xsave->data[272], sizeof(X86_CPU(cpu_state)->env.opmask_regs));\n+    memcpy(&X86_CPU(cpu_state)->env.bnd_regs, &xsave->data[240], sizeof(X86_CPU(cpu_state)->env.bnd_regs));\n+    memcpy(&X86_CPU(cpu_state)->env.bndcs_regs, &xsave->data[256], sizeof(X86_CPU(cpu_state)->env.bndcs_regs));\n+    memcpy(&X86_CPU(cpu_state)->env.hi16_zmm_regs, &xsave->data[416], sizeof(X86_CPU(cpu_state)->env.hi16_zmm_regs));\n+    \n+    \n+    X86_CPU(cpu_state)->env.fpuc = (uint16_t)xsave->data[0];\n+    X86_CPU(cpu_state)->env.fpus = (uint16_t)(xsave->data[0] >> 16);\n+    X86_CPU(cpu_state)->env.fpstt = (X86_CPU(cpu_state)->env.fpus >> 11) & 7;\n+    X86_CPU(cpu_state)->env.fpop = (uint16_t)(xsave->data[1] >> 16);\n+    \n+    for (x = 0; x < 8; ++x)\n+       X86_CPU(cpu_state)->env.fptags[x] =\n+        ((((uint16_t)xsave->data[1] >> x) & 1) == 0);\n+    \n+    memcpy(&X86_CPU(cpu_state)->env.xmm_regs, &xsave->data[40], sizeof(X86_CPU(cpu_state)->env.xmm_regs));\n+\n+    X86_CPU(cpu_state)->env.mxcsr = xsave->data[6];\n+    X86_CPU(cpu_state)->env.xstate_bv = *(uint64_t *)&xsave->data[128];\n+}\n+\n+void hvf_get_segments(CPUState *cpu_state)\n+{\n+    CPUX86State *env = &X86_CPU(cpu_state)->env;\n+\n+    struct vmx_segment seg;\n+\n+    env->interrupt_injected = -1;\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_CS);\n+    hvf_get_segment(&env->segs[R_CS], &seg);\n+    \n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_DS);\n+    hvf_get_segment(&env->segs[R_DS], &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_ES);\n+    hvf_get_segment(&env->segs[R_ES], &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_FS);\n+    hvf_get_segment(&env->segs[R_FS], &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_GS);\n+    hvf_get_segment(&env->segs[R_GS], &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_SS);\n+    hvf_get_segment(&env->segs[R_SS], &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_TR);\n+    hvf_get_segment(&env->tr, &seg);\n+\n+    vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR);\n+    hvf_get_segment(&env->ldt, &seg);\n+\n+    env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);\n+    env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);\n+    env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n+    env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);\n+\n+    env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);\n+    env->cr[2] = 0;\n+    env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);\n+    env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);\n+    \n+    env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);\n+}\n+\n+void hvf_get_msrs(CPUState *cpu_state)\n+{\n+    CPUX86State *env = &X86_CPU(cpu_state)->env;\n+    uint64_t tmp;\n+    \n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);\n+    env->sysenter_cs = tmp;\n+    \n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);\n+    env->sysenter_esp = tmp;\n+\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);\n+    env->sysenter_eip = tmp;\n+\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);\n+\n+#ifdef TARGET_X86_64\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);\n+#endif\n+\n+    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);\n+    \n+    env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);\n+}\n+\n+int hvf_put_registers(CPUState *cpu_state)\n+{\n+    X86CPU *x86cpu = X86_CPU(cpu_state);\n+    CPUX86State *env = &x86cpu->env;\n+\n+    wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);\n+    wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);\n+    wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);\n+    wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);\n+   \n+    wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);\n+    \n+    hvf_put_xsave(cpu_state);\n+    \n+    hvf_put_segments(cpu_state);\n+    \n+    hvf_put_msrs(cpu_state);\n+    \n+    wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);\n+    wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);\n+    \n+    return 0;\n+}\n+\n+int hvf_get_registers(CPUState *cpu_state)\n+{\n+    X86CPU *x86cpu = X86_CPU(cpu_state);\n+    CPUX86State *env = &x86cpu->env;\n+\n+\n+    env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);\n+    env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);\n+    env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);\n+    env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);\n+    env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);\n+    env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);\n+    env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);\n+    env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);\n+    env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);\n+    env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);\n+    env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);\n+    env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);\n+    env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);\n+    env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);\n+    env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);\n+    env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);\n+    \n+    env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n+    env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);\n+   \n+    hvf_get_xsave(cpu_state);\n+    env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);\n+    \n+    hvf_get_segments(cpu_state);\n+    hvf_get_msrs(cpu_state);\n+    \n+    env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);\n+    env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);\n+    env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);\n+    env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);\n+    env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);\n+    env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);\n+    env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);\n+    env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);\n+    \n+    return 0;\n+}\n+\n+static void vmx_set_int_window_exiting(CPUState *cpu)\n+{\n+     uint64_t val;\n+     val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n+     wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n+}\n+\n+void vmx_clear_int_window_exiting(CPUState *cpu)\n+{\n+     uint64_t val;\n+     val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n+     wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n+}\n+\n+#define NMI_VEC 2\n+\n+void hvf_inject_interrupts(CPUState *cpu_state)\n+{\n+    X86CPU *x86cpu = X86_CPU(cpu_state);\n+    int allow_nmi = !(rvmcs(cpu_state->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING);\n+\n+    uint64_t idt_info = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+    uint64_t info = 0;\n+    \n+    if (idt_info & VMCS_IDT_VEC_VALID) {\n+        uint8_t vector = idt_info & 0xff;\n+        uint64_t intr_type = idt_info & VMCS_INTR_T_MASK;\n+        info = idt_info;\n+        \n+        uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);\n+        if (intr_type == VMCS_INTR_T_NMI && reason != EXIT_REASON_TASK_SWITCH) {\n+            allow_nmi = 1;\n+            vmx_clear_nmi_blocking(cpu_state);\n+        }\n+        \n+        if ((allow_nmi || intr_type != VMCS_INTR_T_NMI)) {\n+            info &= ~(1 << 12); /* clear undefined bit */\n+            if (intr_type == VMCS_INTR_T_SWINTR ||\n+                intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||\n+                intr_type == VMCS_INTR_T_SWEXCEPTION) {\n+                uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+                wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len);\n+            }\n+            if (vector == EXCEPTION_BP || vector == EXCEPTION_OF) {\n+                /*\n+                 * VT-x requires #BP and #OF to be injected as software\n+                 * exceptions.\n+                 */\n+                info &= ~VMCS_INTR_T_MASK;\n+                info |= VMCS_INTR_T_SWEXCEPTION;\n+                uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+                wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len);\n+            }\n+            \n+            uint64_t err = 0;\n+            if (idt_info & VMCS_INTR_DEL_ERRCODE) {\n+                err = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_ERROR);\n+                wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, err);\n+            }\n+            //printf(\"reinject  %lx err %d\\n\", info, err);\n+            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);\n+        };\n+    }\n+\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {\n+        if (allow_nmi && !(info & VMCS_INTR_VALID)) {\n+            cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;\n+            info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC;\n+            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);\n+        } else {\n+            vmx_set_nmi_window_exiting(cpu_state);\n+        }\n+    }\n+\n+    if (cpu_state->hvf_x86->interruptable && (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n+        (EFLAGS(cpu_state) & IF_MASK) && !(info & VMCS_INTR_VALID)) {\n+        int line = cpu_get_pic_interrupt(&x86cpu->env);\n+        cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;\n+        if (line >= 0)\n+            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);\n+    }\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD)\n+        vmx_set_int_window_exiting(cpu_state);\n+}\n+\n+int hvf_process_events(CPUState *cpu_state)\n+{\n+    X86CPU *cpu = X86_CPU(cpu_state);\n+    CPUX86State *env = &cpu->env;\n+    \n+    EFLAGS(cpu_state) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n+\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {\n+        hvf_cpu_synchronize_state(cpu_state);\n+        do_cpu_init(cpu);\n+    }\n+\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {\n+        cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;\n+        apic_poll_irq(cpu->apic_state);\n+    }\n+    if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(cpu_state) & IF_MASK)) ||\n+        (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {\n+        cpu_state->halted = 0;\n+    }\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {\n+        hvf_cpu_synchronize_state(cpu_state);\n+        do_cpu_sipi(cpu);\n+    }\n+    if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {\n+        cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;\n+        hvf_cpu_synchronize_state(cpu_state);\n+        apic_handle_tpr_access_report(cpu->apic_state, env->eip,\n+                                      env->tpr_access_type);\n+    }\n+    return cpu_state->halted;\n+}\n+\ndiff --git a/target/i386/hvf-utils/x86hvf.h b/target/i386/hvf-utils/x86hvf.h\nnew file mode 100644\nindex 0000000000..b4cb4c4d26\n--- /dev/null\n+++ b/target/i386/hvf-utils/x86hvf.h\n@@ -0,0 +1,19 @@\n+#ifndef X86HVF_H\n+#define X86HVF_H\n+#include \"cpu.h\"\n+#include \"x86_descr.h\"\n+\n+int hvf_process_events(CPUState *);\n+int hvf_put_registers(CPUState *);\n+int hvf_get_registers(CPUState *);\n+void hvf_inject_interrupts(CPUState *);\n+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr);\n+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);\n+void hvf_put_xsave(CPUState *cpu_state);\n+void hvf_put_segments(CPUState *cpu_state);\n+void hvf_put_msrs(CPUState *cpu_state);\n+void hvf_get_xsave(CPUState *cpu_state);\n+void hvf_get_msrs(CPUState *cpu_state);\n+void vmx_clear_int_window_exiting(CPUState *cpu);\n+void hvf_get_segments(CPUState *cpu_state);\n+#endif\n",
    "prefixes": [
        "02/14"
    ]
}