Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/806413/?format=api
{ "id": 806413, "url": "http://patchwork.ozlabs.org/api/patches/806413/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-8-Sergio.G.DelReal@gmail.com/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20170828015654.2530-8-Sergio.G.DelReal@gmail.com>", "list_archive_url": null, "date": "2017-08-28T01:56:47", "name": "[07/14] hvf: run hvf code through checkpatch.pl and fix style issues", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "595a3a7b296351f45ae94784afc3fab3f5bf358c", "submitter": { "id": 70675, "url": "http://patchwork.ozlabs.org/api/people/70675/?format=api", "name": "Sergio Andres Gomez Del Real", "email": "sergio.g.delreal@gmail.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-8-Sergio.G.DelReal@gmail.com/mbox/", "series": [ { "id": 56, "url": "http://patchwork.ozlabs.org/api/series/56/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=56", "date": "2017-08-28T01:56:40", "name": "add support for Hypervisor.framework in QEMU", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/56/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/806413/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/806413/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=nongnu.org\n\t(client-ip=2001:4830:134:3::11; helo=lists.gnu.org;\n\tenvelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"QskqexLz\"; dkim-atps=neutral" ], "Received": [ "from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11])\n\t(using TLSv1 with cipher AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xgjpQ6yClz9ryv\n\tfor <incoming@patchwork.ozlabs.org>;\n\tMon, 28 Aug 2017 17:20:14 +1000 (AEST)", "from localhost ([::1]:37283 helo=lists.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.71) (envelope-from\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1dmELD-00005X-Nd\n\tfor incoming@patchwork.ozlabs.org; Mon, 28 Aug 2017 03:20:11 -0400", "from eggs.gnu.org ([2001:4830:134:3::10]:42268)\n\tby lists.gnu.org with esmtp (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9Jb-0005Qj-PI\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:58:38 -0400", "from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9J9-00019e-LB\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:58:11 -0400", "from mail-ua0-x243.google.com ([2607:f8b0:400c:c08::243]:36120)\n\tby eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16)\n\t(Exim 4.71) (envelope-from <sergio.g.delreal@gmail.com>)\n\tid 1dm9J9-00018l-2B\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:57:43 -0400", "by mail-ua0-x243.google.com with SMTP id b3so2018672uag.3\n\tfor <qemu-devel@nongnu.org>; Sun, 27 Aug 2017 18:57:42 -0700 (PDT)", "from localhost.localdomain ([191.109.6.191])\n\tby smtp.gmail.com with ESMTPSA id\n\ty12sm2696824uad.22.2017.08.27.18.57.32\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tSun, 27 Aug 2017 18:57:39 -0700 (PDT)" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=Q72Ytvru2TJPnwdV86xARiOuxAv96aLqDJ4lcl9Sk3E=;\n\tb=QskqexLzyGlK6Wlfl6vuZIdYTGCSWMB9NXz4+SDR74FqMG92rfotXBcpdP0dcRxWci\n\tvwioRh7AiZ0OhmMM7SA4pXoxxG6RNpHhwQ1eFEHV0Fh3DoKX8GnSECeRxcIyHYeLpqLM\n\tOVZvTEUvQ9wpu48WENWQQ9afUh4hCbiK5zHN47grnKjP7ZaaJkgPdRfwrXYkgY3DDjel\n\tBZNhwM8KiyQX1Q37K17DWN52whMzL3sgE/nl8Wanx1WxrAd9r/faTR7j+LYdGU/Bm7Px\n\t99TCI4nZMpZm1Tc6amcWjp1oZVS6zjEKWAWOKxUvtqa/Y5nSIlY96WXtgjNoOgNmeWlf\n\tTcYg==", "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=Q72Ytvru2TJPnwdV86xARiOuxAv96aLqDJ4lcl9Sk3E=;\n\tb=oxlrLO5KxHaFb74zdoiQoPF+fkSzuyNIZGrY2CrKwMe79bEtfAAUMhHzxjzo3GqYcB\n\tu+sZ9I419X2WwfZ45j1pIJJy7E9uHDBeBHDXbFoV38PsSM9slCRqTsoofq+i2EyvJ/QX\n\tIS0+EyUC4Ts4WxqjneMM3TSEnLJ9KdnfxBvGdBHi9UgdOn6Ozq9UshNoADqXSIr1i8tf\n\t53SOyvV+0uAfxtOSwkT7GQ9XaHtyeBsu5mEU91HTLhACKkwo0J7ayMWQUKazvm8C7GFJ\n\t9JWYO1QbOfAhW9UjtgntseZwSWq6H1yOoACAMweVoADfZ2n+YHHYHHLIY7VxUxAEVEbr\n\tbpAg==", "X-Gm-Message-State": "AHYfb5g8wzY8x1uK2nS4bkp1ePD+BwfqQWgcb8sVQvi12fn3XsY6gdjG\n\tcI+Q8bdde28k+llM", "X-Received": "by 10.176.18.96 with SMTP id s32mr3804443uac.150.1503885460902; \n\tSun, 27 Aug 2017 18:57:40 -0700 (PDT)", "From": "Sergio Andres Gomez Del Real <sergio.g.delreal@gmail.com>", "X-Google-Original-From": "Sergio Andres Gomez Del Real\n\t<Sergio.G.DelReal@gmail.com>", "To": "qemu-devel@nongnu.org", "Date": "Sun, 27 Aug 2017 20:56:47 -0500", "Message-Id": "<20170828015654.2530-8-Sergio.G.DelReal@gmail.com>", "X-Mailer": "git-send-email 2.11.0", "In-Reply-To": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>", "References": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>", "X-detected-operating-system": "by eggs.gnu.org: Genre and OS details not\n\trecognized.", "X-Received-From": "2607:f8b0:400c:c08::243", "X-Mailman-Approved-At": "Mon, 28 Aug 2017 03:16:06 -0400", "Subject": "[Qemu-devel] [PATCH 07/14] hvf: run hvf code through checkpatch.pl\n\tand fix style issues", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.21", "Precedence": "list", "List-Id": "<qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<http://lists.nongnu.org/archive/html/qemu-devel/>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Cc": "Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "\"Qemu-devel\"\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>" }, "content": "Signed-off-by: Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>\n---\n include/sysemu/hvf.h | 22 +-\n target/i386/hvf-all.c | 793 ++++++------\n target/i386/hvf-i386.h | 2 +-\n target/i386/hvf-utils/vmcs.h | 484 ++++----\n target/i386/hvf-utils/vmx.h | 92 +-\n target/i386/hvf-utils/x86.c | 86 +-\n target/i386/hvf-utils/x86.h | 112 +-\n target/i386/hvf-utils/x86_cpuid.c | 337 ++---\n target/i386/hvf-utils/x86_cpuid.h | 7 +-\n target/i386/hvf-utils/x86_decode.c | 2402 ++++++++++++++++++++++--------------\n target/i386/hvf-utils/x86_decode.h | 30 +-\n target/i386/hvf-utils/x86_descr.h | 29 +-\n target/i386/hvf-utils/x86_emu.c | 1337 ++++++++++----------\n target/i386/hvf-utils/x86_emu.h | 15 +\n target/i386/hvf-utils/x86_flags.c | 52 +-\n target/i386/hvf-utils/x86_flags.h | 101 +-\n target/i386/hvf-utils/x86_mmu.c | 85 +-\n target/i386/hvf-utils/x86_mmu.h | 6 +-\n target/i386/hvf-utils/x86hvf.c | 106 +-\n target/i386/hvf-utils/x86hvf.h | 5 +-\n 20 files changed, 3437 insertions(+), 2666 deletions(-)", "diff": "diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h\nindex 5e2b5f8f76..f9a5a9c5d3 100644\n--- a/include/sysemu/hvf.h\n+++ b/include/sysemu/hvf.h\n@@ -49,7 +49,6 @@ struct hvf_vcpu_caps {\n uint64_t vmx_cap_preemption_timer;\n };\n \n-int __hvf_set_memory(hvf_slot *);\n void hvf_set_phys_mem(MemoryRegionSection *, bool);\n void hvf_handle_io(CPUArchState *, uint16_t, void *,\n int, int, int);\n@@ -58,16 +57,16 @@ hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);\n /* Returns 1 if HVF is available and enabled, 0 otherwise. */\n int hvf_enabled(void);\n \n-/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by the host CPU.\n- * Use hvf_enabled() after this to get the result. */\n+/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by\n+ * the host CPU. Use hvf_enabled() after this to get the result. */\n void hvf_disable(int disable);\n \n-/* Returns non-0 if the host CPU supports the VMX \"unrestricted guest\" feature which\n- * allows the virtual CPU to directly run in \"real mode\". If true, this allows QEMU to run\n- * several vCPU threads in parallel (see cpus.c). Otherwise, only a a single TCG thread\n- * can run, and it will call HVF to run the current instructions, except in case of\n- * \"real mode\" (paging disabled, typically at boot time), or MMIO operations. */\n-// int hvf_ug_platform(void); does not apply to HVF; assume we must be in UG mode\n+/* Returns non-0 if the host CPU supports the VMX \"unrestricted guest\" feature\n+ * which allows the virtual CPU to directly run in \"real mode\". If true, this\n+ * allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,\n+ * only a a single TCG thread can run, and it will call HVF to run the current\n+ * instructions, except in case of \"real mode\" (paging disabled, typically at\n+ * boot time), or MMIO operations. */\n \n int hvf_sync_vcpus(void);\n \n@@ -81,13 +80,12 @@ void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);\n \n void hvf_vcpu_destroy(CPUState *);\n void hvf_raise_event(CPUState *);\n-// void hvf_reset_vcpu_state(void *opaque);\n+/* void hvf_reset_vcpu_state(void *opaque); */\n void vmx_reset_vcpu(CPUState *);\n-void __hvf_cpu_synchronize_state(CPUState *, run_on_cpu_data);\n-void __hvf_cpu_synchronize_post_reset(CPUState *, run_on_cpu_data);\n void vmx_update_tpr(CPUState *);\n void update_apic_tpr(CPUState *);\n int hvf_put_registers(CPUState *);\n+void vmx_clear_int_window_exiting(CPUState *cpu);\n \n #define TYPE_HVF_ACCEL ACCEL_CLASS_NAME(\"hvf\")\n \ndiff --git a/target/i386/hvf-all.c b/target/i386/hvf-all.c\nindex 06cd8429eb..88b5281975 100644\n--- a/target/i386/hvf-all.c\n+++ b/target/i386/hvf-all.c\n@@ -34,44 +34,47 @@ static int hvf_disabled = 1;\n \n static void assert_hvf_ok(hv_return_t ret)\n {\n- if (ret == HV_SUCCESS)\n+ if (ret == HV_SUCCESS) {\n return;\n+ }\n \n switch (ret) {\n- case HV_ERROR:\n- fprintf(stderr, \"Error: HV_ERROR\\n\");\n- break;\n- case HV_BUSY:\n- fprintf(stderr, \"Error: HV_BUSY\\n\");\n- break;\n- case HV_BAD_ARGUMENT:\n- fprintf(stderr, \"Error: HV_BAD_ARGUMENT\\n\");\n- break;\n- case HV_NO_RESOURCES:\n- fprintf(stderr, \"Error: HV_NO_RESOURCES\\n\");\n- break;\n- case HV_NO_DEVICE:\n- fprintf(stderr, \"Error: HV_NO_DEVICE\\n\");\n- break;\n- case HV_UNSUPPORTED:\n- fprintf(stderr, \"Error: HV_UNSUPPORTED\\n\");\n- break;\n- default:\n- fprintf(stderr, \"Unknown Error\\n\");\n+ case HV_ERROR:\n+ fprintf(stderr, \"Error: HV_ERROR\\n\");\n+ break;\n+ case HV_BUSY:\n+ fprintf(stderr, \"Error: HV_BUSY\\n\");\n+ break;\n+ case HV_BAD_ARGUMENT:\n+ fprintf(stderr, \"Error: HV_BAD_ARGUMENT\\n\");\n+ break;\n+ case HV_NO_RESOURCES:\n+ fprintf(stderr, \"Error: HV_NO_RESOURCES\\n\");\n+ break;\n+ case HV_NO_DEVICE:\n+ fprintf(stderr, \"Error: HV_NO_DEVICE\\n\");\n+ break;\n+ case HV_UNSUPPORTED:\n+ fprintf(stderr, \"Error: HV_UNSUPPORTED\\n\");\n+ break;\n+ default:\n+ fprintf(stderr, \"Unknown Error\\n\");\n }\n \n abort();\n }\n \n-// Memory slots/////////////////////////////////////////////////////////////////\n-\n-hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end) {\n+/* Memory slots */\n+hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)\n+{\n hvf_slot *slot;\n int x;\n for (x = 0; x < hvf_state->num_slots; ++x) {\n slot = &hvf_state->slots[x];\n- if (slot->size && start < (slot->start + slot->size) && end > slot->start)\n+ if (slot->size && start < (slot->start + slot->size) &&\n+ end > slot->start) {\n return slot;\n+ }\n }\n return NULL;\n }\n@@ -84,13 +87,12 @@ struct mac_slot {\n };\n \n struct mac_slot mac_slots[32];\n-#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))\n+#define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))\n \n-int __hvf_set_memory(hvf_slot *slot)\n+static int do_hvf_set_memory(hvf_slot *slot)\n {\n struct mac_slot *macslot;\n hv_memory_flags_t flags;\n- pthread_rwlock_wrlock(&mem_lock);\n hv_return_t ret;\n \n macslot = &mac_slots[slot->slot_id];\n@@ -104,7 +106,6 @@ int __hvf_set_memory(hvf_slot *slot)\n }\n \n if (!slot->size) {\n- pthread_rwlock_unlock(&mem_lock);\n return 0;\n }\n \n@@ -115,16 +116,17 @@ int __hvf_set_memory(hvf_slot *slot)\n macslot->size = slot->size;\n ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);\n assert_hvf_ok(ret);\n- pthread_rwlock_unlock(&mem_lock);\n return 0;\n }\n \n-void hvf_set_phys_mem(MemoryRegionSection* section, bool add)\n+void hvf_set_phys_mem(MemoryRegionSection *section, bool add)\n {\n hvf_slot *mem;\n MemoryRegion *area = section->mr;\n \n- if (!memory_region_is_ram(area)) return;\n+ if (!memory_region_is_ram(area)) {\n+ return;\n+ }\n \n mem = hvf_find_overlap_slot(\n section->offset_within_address_space,\n@@ -132,29 +134,34 @@ void hvf_set_phys_mem(MemoryRegionSection* section, bool add)\n \n if (mem && add) {\n if (mem->size == int128_get64(section->size) &&\n- mem->start == section->offset_within_address_space &&\n- mem->mem == (memory_region_get_ram_ptr(area) + section->offset_within_region))\n- return; // Same region was attempted to register, go away.\n+ mem->start == section->offset_within_address_space &&\n+ mem->mem == (memory_region_get_ram_ptr(area) +\n+ section->offset_within_region)) {\n+ return; /* Same region was attempted to register, go away. */\n+ }\n }\n \n- // Region needs to be reset. set the size to 0 and remap it.\n+ /* Region needs to be reset. set the size to 0 and remap it. */\n if (mem) {\n mem->size = 0;\n- if (__hvf_set_memory(mem)) {\n+ if (do_hvf_set_memory(mem)) {\n fprintf(stderr, \"Failed to reset overlapping slot\\n\");\n abort();\n }\n }\n \n- if (!add) return;\n+ if (!add) {\n+ return;\n+ }\n \n- // Now make a new slot.\n+ /* Now make a new slot. */\n int x;\n \n for (x = 0; x < hvf_state->num_slots; ++x) {\n mem = &hvf_state->slots[x];\n- if (!mem->size)\n+ if (!mem->size) {\n break;\n+ }\n }\n \n if (x == hvf_state->num_slots) {\n@@ -166,36 +173,26 @@ void hvf_set_phys_mem(MemoryRegionSection* section, bool add)\n mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;\n mem->start = section->offset_within_address_space;\n \n- if (__hvf_set_memory(mem)) {\n+ if (do_hvf_set_memory(mem)) {\n fprintf(stderr, \"Error registering new memory slot\\n\");\n abort();\n }\n }\n \n-/* return -1 if no bit is set */\n-static int get_highest_priority_int(uint32_t *tab)\n-{\n- int i;\n- for (i = 7; i >= 0; i--) {\n- if (tab[i] != 0) {\n- return i * 32 + apic_fls_bit(tab[i]);\n- }\n- }\n- return -1;\n-}\n-\n void vmx_update_tpr(CPUState *cpu)\n {\n- // TODO: need integrate APIC handling\n+ /* TODO: need integrate APIC handling */\n X86CPU *x86_cpu = X86_CPU(cpu);\n int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;\n int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);\n \n wreg(cpu->hvf_fd, HV_X86_TPR, tpr);\n- if (irr == -1)\n+ if (irr == -1) {\n wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n- else\n- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : irr >> 4);\n+ } else {\n+ wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :\n+ irr >> 4);\n+ }\n }\n \n void update_apic_tpr(CPUState *cpu)\n@@ -207,7 +204,7 @@ void update_apic_tpr(CPUState *cpu)\n \n #define VECTORING_INFO_VECTOR_MASK 0xff\n \n-// TODO: taskswitch handling\n+/* TODO: taskswitch handling */\n static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n {\n /* CR3 and ldt selector are not saved intentionally */\n@@ -247,13 +244,20 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n RSI(cpu) = tss->esi;\n RDI(cpu) = tss->edi;\n \n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, REG_SEG_LDTR);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, REG_SEG_ES);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, REG_SEG_CS);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, REG_SEG_SS);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, REG_SEG_DS);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, REG_SEG_FS);\n- vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, REG_SEG_GS);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}},\n+ REG_SEG_LDTR);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}},\n+ REG_SEG_ES);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}},\n+ REG_SEG_CS);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}},\n+ REG_SEG_SS);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}},\n+ REG_SEG_DS);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}},\n+ REG_SEG_FS);\n+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}},\n+ REG_SEG_GS);\n \n #if 0\n load_segment(cpu, REG_SEG_LDTR, tss->ldt);\n@@ -266,8 +270,10 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n #endif\n }\n \n-static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,\n- uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)\n+static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel,\n+ x68_segment_selector old_tss_sel,\n+ uint64_t old_tss_base,\n+ struct x86_segment_descriptor *new_desc)\n {\n struct x86_tss_segment32 tss_seg;\n uint32_t new_tss_base = x86_segment_base(new_desc);\n@@ -277,19 +283,22 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme\n vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));\n save_state_to_tss32(cpu, &tss_seg);\n \n- vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);\n+ vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset -\n+ eip_offset);\n vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));\n \n if (old_tss_sel.sel != 0xffff) {\n tss_seg.prev_tss = old_tss_sel.sel;\n \n- vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));\n+ vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss,\n+ sizeof(tss_seg.prev_tss));\n }\n load_state_from_tss32(cpu, &tss_seg);\n return 0;\n }\n \n-static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)\n+static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n+ int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)\n {\n uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&\n@@ -320,12 +329,14 @@ static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n \n dpl = task_gate_desc.dpl;\n x68_segment_selector cs = vmx_read_segment_selector(cpu, REG_SEG_CS);\n- if (tss_sel.rpl > dpl || cs.rpl > dpl)\n- ;//DPRINTF(\"emulate_gp\");\n+ if (tss_sel.rpl > dpl || cs.rpl > dpl) {\n+ VM_PANIC(\"emulate_gp\");\n+ }\n }\n \n desc_limit = x86_segment_limit(&next_tss_desc);\n- if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {\n+ if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||\n+ desc_limit < 0x2b)) {\n VM_PANIC(\"emulate_ts\");\n }\n \n@@ -334,22 +345,27 @@ static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);\n }\n \n- if (reason == TSR_IRET)\n+ if (reason == TSR_IRET) {\n EFLAGS(cpu) &= ~RFLAGS_NT;\n+ }\n \n- if (reason != TSR_CALL && reason != TSR_IDT_GATE)\n+ if (reason != TSR_CALL && reason != TSR_IDT_GATE) {\n old_tss_sel.sel = 0xffff;\n+ }\n \n if (reason != TSR_IRET) {\n next_tss_desc.type |= (1 << 1); /* set busy flag */\n x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);\n }\n \n- if (next_tss_desc.type & 8)\n- ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);\n- else\n- //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);\n+ if (next_tss_desc.type & 8) {\n+ ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base,\n+ &next_tss_desc);\n+ } else {\n+ /*ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base,\n+ * &next_tss_desc);*/\n VM_PANIC(\"task_switch_16\");\n+ }\n \n macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);\n x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);\n@@ -361,7 +377,7 @@ static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n hv_vcpu_flush(cpu->hvf_fd);\n }\n \n-static void hvf_handle_interrupt(CPUState * cpu, int mask)\n+static void hvf_handle_interrupt(CPUState *cpu, int mask)\n {\n cpu->interrupt_request |= mask;\n if (!qemu_cpu_is_self(cpu)) {\n@@ -369,7 +385,7 @@ static void hvf_handle_interrupt(CPUState * cpu, int mask)\n }\n }\n \n-void hvf_handle_io(CPUArchState * env, uint16_t port, void* buffer,\n+void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,\n int direction, int size, int count)\n {\n int i;\n@@ -382,24 +398,26 @@ void hvf_handle_io(CPUArchState * env, uint16_t port, void* buffer,\n ptr += size;\n }\n }\n-//\n-// TODO: synchronize vcpu state\n-void __hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)\n+\n+/* TODO: synchronize vcpu state */\n+static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)\n {\n- CPUState *cpu_state = cpu;//(CPUState *)data;\n- if (cpu_state->hvf_vcpu_dirty == 0)\n+ CPUState *cpu_state = cpu;\n+ if (cpu_state->hvf_vcpu_dirty == 0) {\n hvf_get_registers(cpu_state);\n+ }\n \n cpu_state->hvf_vcpu_dirty = 1;\n }\n \n void hvf_cpu_synchronize_state(CPUState *cpu_state)\n {\n- if (cpu_state->hvf_vcpu_dirty == 0)\n- run_on_cpu(cpu_state, __hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);\n+ if (cpu_state->hvf_vcpu_dirty == 0) {\n+ run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);\n+ }\n }\n \n-void __hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)\n+static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)\n {\n CPUState *cpu_state = cpu;\n hvf_put_registers(cpu_state);\n@@ -408,7 +426,7 @@ void __hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)\n \n void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)\n {\n- run_on_cpu(cpu_state, __hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);\n+ run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);\n }\n \n void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)\n@@ -422,44 +440,45 @@ void hvf_cpu_synchronize_post_init(CPUState *cpu_state)\n {\n run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);\n }\n- \n-// TODO: ept fault handlig\n-void vmx_clear_int_window_exiting(CPUState *cpu);\n+\n+/* TODO: ept fault handlig */\n static bool ept_emulation_fault(uint64_t ept_qual)\n {\n-\tint read, write;\n-\n-\t/* EPT fault on an instruction fetch doesn't make sense here */\n-\tif (ept_qual & EPT_VIOLATION_INST_FETCH)\n-\t\treturn false;\n-\n-\t/* EPT fault must be a read fault or a write fault */\n-\tread = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;\n-\twrite = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;\n-\tif ((read | write) == 0)\n-\t\treturn false;\n-\n-\t/*\n-\t * The EPT violation must have been caused by accessing a\n-\t * guest-physical address that is a translation of a guest-linear\n-\t * address.\n-\t */\n-\tif ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||\n-\t (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {\n-\t\treturn false;\n-\t}\n-\n-\treturn true;\n+ int read, write;\n+\n+ /* EPT fault on an instruction fetch doesn't make sense here */\n+ if (ept_qual & EPT_VIOLATION_INST_FETCH) {\n+ return false;\n+ }\n+\n+ /* EPT fault must be a read fault or a write fault */\n+ read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;\n+ write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;\n+ if ((read | write) == 0) {\n+ return false;\n+ }\n+\n+ /*\n+ * The EPT violation must have been caused by accessing a\n+ * guest-physical address that is a translation of a guest-linear\n+ * address.\n+ */\n+ if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||\n+ (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {\n+ return false;\n+ }\n+\n+ return true;\n }\n \n-static void hvf_region_add(MemoryListener * listener,\n- MemoryRegionSection * section)\n+static void hvf_region_add(MemoryListener *listener,\n+ MemoryRegionSection *section)\n {\n hvf_set_phys_mem(section, true);\n }\n \n-static void hvf_region_del(MemoryListener * listener,\n- MemoryRegionSection * section)\n+static void hvf_region_del(MemoryListener *listener,\n+ MemoryRegionSection *section)\n {\n hvf_set_phys_mem(section, false);\n }\n@@ -470,70 +489,69 @@ static MemoryListener hvf_memory_listener = {\n .region_del = hvf_region_del,\n };\n \n-static MemoryListener hvf_io_listener = {\n- .priority = 10,\n-};\n-\n void vmx_reset_vcpu(CPUState *cpu) {\n \n+ /* TODO: this shouldn't be needed; there is already a call to\n+ * cpu_synchronize_all_post_reset in vl.c\n+ */\n wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);\n macvm_set_cr0(cpu->hvf_fd, 0x60000010);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);\n wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);\n-\n- // set VMCS guest state fields\n+ \n+ /* set VMCS guest state fields */\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);\n-\n+ \n wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);\n wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);\n-\n- //wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);\n+ \n+ /*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);\n-\n+ \n wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);\n wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);\n wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);\n@@ -544,9 +562,10 @@ void vmx_reset_vcpu(CPUState *cpu) {\n wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);\n wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);\n wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);\n-\n- for (int i = 0; i < 8; i++)\n- wreg(cpu->hvf_fd, HV_X86_R8+i, 0x0);\n+ \n+ for (int i = 0; i < 8; i++) {\n+ wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);\n+ }\n \n hv_vm_sync_tsc(0);\n cpu->halted = 0;\n@@ -554,7 +573,7 @@ void vmx_reset_vcpu(CPUState *cpu) {\n hv_vcpu_flush(cpu->hvf_fd);\n }\n \n-void hvf_vcpu_destroy(CPUState* cpu) \n+void hvf_vcpu_destroy(CPUState *cpu)\n {\n hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);\n assert_hvf_ok(ret);\n@@ -564,11 +583,12 @@ static void dummy_signal(int sig)\n {\n }\n \n-int hvf_init_vcpu(CPUState * cpu) {\n+int hvf_init_vcpu(CPUState *cpu)\n+{\n \n X86CPU *x86cpu;\n- \n- // init cpu signals\n+\n+ /* init cpu signals */\n sigset_t set;\n struct sigaction sigact;\n \n@@ -584,42 +604,55 @@ int hvf_init_vcpu(CPUState * cpu) {\n init_decoder(cpu);\n init_cpuid(cpu);\n \n- cpu->hvf_caps = (struct hvf_vcpu_caps*)g_malloc0(sizeof(struct hvf_vcpu_caps));\n- cpu->hvf_x86 = (struct hvf_x86_state*)g_malloc0(sizeof(struct hvf_x86_state));\n+ cpu->hvf_caps = (struct hvf_vcpu_caps *)g_malloc0(sizeof(struct hvf_vcpu_caps));\n+ cpu->hvf_x86 = (struct hvf_x86_state *)g_malloc0(sizeof(struct hvf_x86_state));\n \n r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);\n cpu->hvf_vcpu_dirty = 1;\n assert_hvf_ok(r);\n \n-\tif (hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &cpu->hvf_caps->vmx_cap_pinbased))\n-\t\tabort();\n-\tif (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cpu->hvf_caps->vmx_cap_procbased))\n-\t\tabort();\n-\tif (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cpu->hvf_caps->vmx_cap_procbased2))\n-\t\tabort();\n-\tif (hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cpu->hvf_caps->vmx_cap_entry))\n-\t\tabort();\n-\n-\t/* set VMCS control fields */\n- wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_pinbased, 0));\n- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_procbased,\n- VMCS_PRI_PROC_BASED_CTLS_HLT |\n- VMCS_PRI_PROC_BASED_CTLS_MWAIT |\n- VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |\n- VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |\n- VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);\n-\twvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,\n- cap2ctrl(cpu->hvf_caps->vmx_cap_procbased2,VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));\n-\n-\twvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_entry, 0));\n-\twvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */\n+ if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,\n+ &cpu->hvf_caps->vmx_cap_pinbased)) {\n+ abort();\n+ }\n+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,\n+ &cpu->hvf_caps->vmx_cap_procbased)) {\n+ abort();\n+ }\n+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,\n+ &cpu->hvf_caps->vmx_cap_procbased2)) {\n+ abort();\n+ }\n+ if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,\n+ &cpu->hvf_caps->vmx_cap_entry)) {\n+ abort();\n+ }\n+\n+ /* set VMCS control fields */\n+ wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,\n+ cap2ctrl(cpu->hvf_caps->vmx_cap_pinbased, 0));\n+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,\n+ cap2ctrl(cpu->hvf_caps->vmx_cap_procbased,\n+ VMCS_PRI_PROC_BASED_CTLS_HLT |\n+ VMCS_PRI_PROC_BASED_CTLS_MWAIT |\n+ VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |\n+ VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |\n+ VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);\n+ wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,\n+ cap2ctrl(cpu->hvf_caps->vmx_cap_procbased2,\n+ VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));\n+\n+ wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(cpu->hvf_caps->vmx_cap_entry,\n+ 0));\n+ wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */\n \n wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n \n vmx_reset_vcpu(cpu);\n \n x86cpu = X86_CPU(cpu);\n- x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, sizeof(struct hvf_xsave_buf));\n+ x86cpu->env.kvm_xsave_buf = qemu_memalign(4096,\n+ sizeof(struct hvf_xsave_buf));\n \n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);\n@@ -629,7 +662,7 @@ int hvf_init_vcpu(CPUState * cpu) {\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);\n- //hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);\n+ /*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);\n hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);\n@@ -637,12 +670,18 @@ int hvf_init_vcpu(CPUState * cpu) {\n return 0;\n }\n \n-int hvf_enabled() { return !hvf_disabled; }\n-void hvf_disable(int shouldDisable) {\n+int hvf_enabled()\n+{\n+ return !hvf_disabled;\n+}\n+\n+void hvf_disable(int shouldDisable)\n+{\n hvf_disabled = shouldDisable;\n }\n \n-int hvf_vcpu_exec(CPUState* cpu) {\n+int hvf_vcpu_exec(CPUState *cpu)\n+{\n X86CPU *x86_cpu = X86_CPU(cpu);\n CPUX86State *env = &x86_cpu->env;\n int ret = 0;\n@@ -662,7 +701,8 @@ int hvf_vcpu_exec(CPUState* cpu) {\n \n cpu->hvf_x86->interruptable =\n !(rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n- (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n+ (VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n \n hvf_inject_interrupts(cpu);\n vmx_update_tpr(cpu);\n@@ -680,14 +720,13 @@ int hvf_vcpu_exec(CPUState* cpu) {\n /* handle VMEXIT */\n uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);\n uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);\n- uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+ uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,\n+ VMCS_EXIT_INSTRUCTION_LENGTH);\n uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n env->eflags = RFLAGS(cpu);\n \n- trace_hvf_vm_exit(exit_reason, exit_qual);\n-\n qemu_mutex_lock_iothread();\n \n update_apic_tpr(cpu);\n@@ -695,239 +734,226 @@ int hvf_vcpu_exec(CPUState* cpu) {\n \n ret = 0;\n switch (exit_reason) {\n- case EXIT_REASON_HLT: {\n- macvm_set_rip(cpu, rip + ins_len);\n- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(cpu) & IF_MASK))\n- && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&\n- !(idtvec_info & VMCS_IDT_VEC_VALID)) {\n- cpu->halted = 1;\n- ret = EXCP_HLT;\n- }\n- ret = EXCP_INTERRUPT;\n- break;\n- }\n- case EXIT_REASON_MWAIT: {\n- ret = EXCP_INTERRUPT;\n- break;\n+ case EXIT_REASON_HLT: {\n+ macvm_set_rip(cpu, rip + ins_len);\n+ if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&\n+ (EFLAGS(cpu) & IF_MASK))\n+ && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&\n+ !(idtvec_info & VMCS_IDT_VEC_VALID)) {\n+ cpu->halted = 1;\n+ ret = EXCP_HLT;\n }\n- /* Need to check if MMIO or unmmaped fault */\n- case EXIT_REASON_EPT_FAULT:\n- {\n- hvf_slot *slot;\n- addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n- trace_hvf_vm_exit_gpa(gpa);\n-\n- if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && (exit_qual & EXIT_QUAL_NMIUDTI) != 0)\n- vmx_set_nmi_blocking(cpu);\n-\n- slot = hvf_find_overlap_slot(gpa, gpa);\n- // mmio\n- if (ept_emulation_fault(exit_qual) && !slot) {\n- struct x86_decode decode;\n-\n- load_regs(cpu);\n- cpu->hvf_x86->fetch_rip = rip;\n-\n- decode_instruction(cpu, &decode);\n- exec_instruction(cpu, &decode);\n- store_regs(cpu);\n- break;\n- }\n-#ifdef DIRTY_VGA_TRACKING\n- if (slot) {\n- bool read = exit_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;\n- bool write = exit_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;\n- if (!read && !write)\n- break;\n- int flags = HV_MEMORY_READ | HV_MEMORY_EXEC;\n- if (write) flags |= HV_MEMORY_WRITE;\n-\n- pthread_rwlock_wrlock(&mem_lock);\n- if (write)\n- mark_slot_page_dirty(slot, gpa);\n- hv_vm_protect(gpa & ~0xfff, 4096, flags);\n- pthread_rwlock_unlock(&mem_lock);\n- }\n-#endif\n- break;\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ }\n+ case EXIT_REASON_MWAIT: {\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ }\n+ /* Need to check if MMIO or unmmaped fault */\n+ case EXIT_REASON_EPT_FAULT:\n+ {\n+ hvf_slot *slot;\n+ addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n+\n+ if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&\n+ ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {\n+ vmx_set_nmi_blocking(cpu);\n }\n- case EXIT_REASON_INOUT:\n- {\n- uint32_t in = (exit_qual & 8) != 0;\n- uint32_t size = (exit_qual & 7) + 1;\n- uint32_t string = (exit_qual & 16) != 0;\n- uint32_t port = exit_qual >> 16;\n- //uint32_t rep = (exit_qual & 0x20) != 0;\n \n-#if 1\n- if (!string && in) {\n- uint64_t val = 0;\n- load_regs(cpu);\n- hvf_handle_io(env, port, &val, 0, size, 1);\n- if (size == 1) AL(cpu) = val;\n- else if (size == 2) AX(cpu) = val;\n- else if (size == 4) RAX(cpu) = (uint32_t)val;\n- else VM_PANIC(\"size\");\n- RIP(cpu) += ins_len;\n- store_regs(cpu);\n- break;\n- } else if (!string && !in) {\n- RAX(cpu) = rreg(cpu->hvf_fd, HV_X86_RAX);\n- hvf_handle_io(env, port, &RAX(cpu), 1, size, 1);\n- macvm_set_rip(cpu, rip + ins_len);\n- break;\n- }\n-#endif\n+ slot = hvf_find_overlap_slot(gpa, gpa);\n+ /* mmio */\n+ if (ept_emulation_fault(exit_qual) && !slot) {\n struct x86_decode decode;\n \n load_regs(cpu);\n cpu->hvf_x86->fetch_rip = rip;\n \n decode_instruction(cpu, &decode);\n- VM_PANIC_ON(ins_len != decode.len);\n exec_instruction(cpu, &decode);\n store_regs(cpu);\n-\n- break;\n- }\n- case EXIT_REASON_CPUID: {\n- uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n- uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);\n- uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n- uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n-\n- get_cpuid_func(cpu, rax, rcx, &rax, &rbx, &rcx, &rdx);\n-\n- wreg(cpu->hvf_fd, HV_X86_RAX, rax);\n- wreg(cpu->hvf_fd, HV_X86_RBX, rbx);\n- wreg(cpu->hvf_fd, HV_X86_RCX, rcx);\n- wreg(cpu->hvf_fd, HV_X86_RDX, rdx);\n-\n- macvm_set_rip(cpu, rip + ins_len);\n- break;\n- }\n- case EXIT_REASON_XSETBV: {\n- X86CPU *x86_cpu = X86_CPU(cpu);\n- CPUX86State *env = &x86_cpu->env;\n- uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n- uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n- uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n-\n- if (ecx) {\n- macvm_set_rip(cpu, rip + ins_len);\n- break;\n- }\n- env->xcr0 = ((uint64_t)edx << 32) | eax;\n- wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);\n- macvm_set_rip(cpu, rip + ins_len);\n- break;\n- }\n- case EXIT_REASON_INTR_WINDOW:\n- vmx_clear_int_window_exiting(cpu);\n- ret = EXCP_INTERRUPT;\n- break;\n- case EXIT_REASON_NMI_WINDOW:\n- vmx_clear_nmi_window_exiting(cpu);\n- ret = EXCP_INTERRUPT;\n- break;\n- case EXIT_REASON_EXT_INTR:\n- /* force exit and allow io handling */\n- ret = EXCP_INTERRUPT;\n- break;\n- case EXIT_REASON_RDMSR:\n- case EXIT_REASON_WRMSR:\n- {\n- load_regs(cpu);\n- if (exit_reason == EXIT_REASON_RDMSR)\n- simulate_rdmsr(cpu);\n- else\n- simulate_wrmsr(cpu);\n- RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n- store_regs(cpu);\n break;\n }\n- case EXIT_REASON_CR_ACCESS: {\n- int cr;\n- int reg;\n+#ifdef DIRTY_VGA_TRACKING\n+ /* TODO: handle dirty page tracking */\n+#endif\n+ break;\n+ }\n+ case EXIT_REASON_INOUT:\n+ {\n+ uint32_t in = (exit_qual & 8) != 0;\n+ uint32_t size = (exit_qual & 7) + 1;\n+ uint32_t string = (exit_qual & 16) != 0;\n+ uint32_t port = exit_qual >> 16;\n+ /*uint32_t rep = (exit_qual & 0x20) != 0;*/\n \n+#if 1\n+ if (!string && in) {\n+ uint64_t val = 0;\n load_regs(cpu);\n- cr = exit_qual & 15;\n- reg = (exit_qual >> 8) & 15;\n-\n- switch (cr) {\n- case 0x0: {\n- macvm_set_cr0(cpu->hvf_fd, RRX(cpu, reg));\n- break;\n- }\n- case 4: {\n- macvm_set_cr4(cpu->hvf_fd, RRX(cpu, reg));\n- break;\n- }\n- case 8: {\n- X86CPU *x86_cpu = X86_CPU(cpu);\n- if (exit_qual & 0x10) {\n- RRX(cpu, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);\n- }\n- else {\n- int tpr = RRX(cpu, reg);\n- cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n- ret = EXCP_INTERRUPT;\n- }\n- break;\n- }\n- default:\n- fprintf(stderr, \"Unrecognized CR %d\\n\", cr);\n- abort();\n+ hvf_handle_io(env, port, &val, 0, size, 1);\n+ if (size == 1) {\n+ AL(cpu) = val;\n+ } else if (size == 2) {\n+ AX(cpu) = val;\n+ } else if (size == 4) {\n+ RAX(cpu) = (uint32_t)val;\n+ } else {\n+ VM_PANIC(\"size\");\n }\n RIP(cpu) += ins_len;\n store_regs(cpu);\n break;\n+ } else if (!string && !in) {\n+ RAX(cpu) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+ hvf_handle_io(env, port, &RAX(cpu), 1, size, 1);\n+ macvm_set_rip(cpu, rip + ins_len);\n+ break;\n }\n- case EXIT_REASON_APIC_ACCESS: { // TODO\n- struct x86_decode decode;\n+#endif\n+ struct x86_decode decode;\n \n- load_regs(cpu);\n- cpu->hvf_x86->fetch_rip = rip;\n+ load_regs(cpu);\n+ cpu->hvf_x86->fetch_rip = rip;\n \n- decode_instruction(cpu, &decode);\n- exec_instruction(cpu, &decode);\n- store_regs(cpu);\n+ decode_instruction(cpu, &decode);\n+ VM_PANIC_ON(ins_len != decode.len);\n+ exec_instruction(cpu, &decode);\n+ store_regs(cpu);\n+\n+ break;\n+ }\n+ case EXIT_REASON_CPUID: {\n+ uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n+ uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);\n+ uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n+ uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+\n+ cpu_x86_cpuid(cpu, rax, rcx, &rax, &rbx, &rcx, &rdx);\n+\n+ wreg(cpu->hvf_fd, HV_X86_RAX, rax);\n+ wreg(cpu->hvf_fd, HV_X86_RBX, rbx);\n+ wreg(cpu->hvf_fd, HV_X86_RCX, rcx);\n+ wreg(cpu->hvf_fd, HV_X86_RDX, rdx);\n+\n+ macvm_set_rip(cpu, rip + ins_len);\n+ break;\n+ }\n+ case EXIT_REASON_XSETBV: {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+ uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n+ uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n+ uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+\n+ if (ecx) {\n+ macvm_set_rip(cpu, rip + ins_len);\n break;\n }\n- case EXIT_REASON_TPR: {\n- ret = 1;\n- break;\n+ env->xcr0 = ((uint64_t)edx << 32) | eax;\n+ wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);\n+ macvm_set_rip(cpu, rip + ins_len);\n+ break;\n+ }\n+ case EXIT_REASON_INTR_WINDOW:\n+ vmx_clear_int_window_exiting(cpu);\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ case EXIT_REASON_NMI_WINDOW:\n+ vmx_clear_nmi_window_exiting(cpu);\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ case EXIT_REASON_EXT_INTR:\n+ /* force exit and allow io handling */\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ case EXIT_REASON_RDMSR:\n+ case EXIT_REASON_WRMSR:\n+ {\n+ load_regs(cpu);\n+ if (exit_reason == EXIT_REASON_RDMSR) {\n+ simulate_rdmsr(cpu);\n+ } else {\n+ simulate_wrmsr(cpu);\n }\n- case EXIT_REASON_TASK_SWITCH: {\n- uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n- x68_segment_selector sel = {.sel = exit_qual & 0xffff};\n- vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,\n- vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo & VMCS_INTR_T_MASK);\n+ RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+ store_regs(cpu);\n+ break;\n+ }\n+ case EXIT_REASON_CR_ACCESS: {\n+ int cr;\n+ int reg;\n+\n+ load_regs(cpu);\n+ cr = exit_qual & 15;\n+ reg = (exit_qual >> 8) & 15;\n+\n+ switch (cr) {\n+ case 0x0: {\n+ macvm_set_cr0(cpu->hvf_fd, RRX(cpu, reg));\n break;\n }\n- case EXIT_REASON_TRIPLE_FAULT: {\n- //addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);\n- usleep(1000 * 100);\n- ret = EXCP_INTERRUPT;\n+ case 4: {\n+ macvm_set_cr4(cpu->hvf_fd, RRX(cpu, reg));\n break;\n }\n- case EXIT_REASON_RDPMC:\n- wreg(cpu->hvf_fd, HV_X86_RAX, 0);\n- wreg(cpu->hvf_fd, HV_X86_RDX, 0);\n- macvm_set_rip(cpu, rip + ins_len);\n- break;\n- case VMX_REASON_VMCALL:\n- // TODO: maybe just take this out?\n- // if (g_hypervisor_iface) {\n- // load_regs(cpu);\n- // g_hypervisor_iface->hypercall_handler(cpu);\n- // RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n- // store_regs(cpu);\n- // }\n+ case 8: {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ if (exit_qual & 0x10) {\n+ RRX(cpu, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);\n+ } else {\n+ int tpr = RRX(cpu, reg);\n+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n+ ret = EXCP_INTERRUPT;\n+ }\n break;\n+ }\n default:\n- fprintf(stderr, \"%llx: unhandled exit %llx\\n\", rip, exit_reason);\n+ fprintf(stderr, \"Unrecognized CR %d\\n\", cr);\n+ abort();\n+ }\n+ RIP(cpu) += ins_len;\n+ store_regs(cpu);\n+ break;\n+ }\n+ case EXIT_REASON_APIC_ACCESS: { /* TODO */\n+ struct x86_decode decode;\n+\n+ load_regs(cpu);\n+ cpu->hvf_x86->fetch_rip = rip;\n+\n+ decode_instruction(cpu, &decode);\n+ exec_instruction(cpu, &decode);\n+ store_regs(cpu);\n+ break;\n+ }\n+ case EXIT_REASON_TPR: {\n+ ret = 1;\n+ break;\n+ }\n+ case EXIT_REASON_TASK_SWITCH: {\n+ uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+ x68_segment_selector sel = {.sel = exit_qual & 0xffff};\n+ vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,\n+ vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo\n+ & VMCS_INTR_T_MASK);\n+ break;\n+ }\n+ case EXIT_REASON_TRIPLE_FAULT: {\n+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);\n+ ret = EXCP_INTERRUPT;\n+ break;\n+ }\n+ case EXIT_REASON_RDPMC:\n+ wreg(cpu->hvf_fd, HV_X86_RAX, 0);\n+ wreg(cpu->hvf_fd, HV_X86_RDX, 0);\n+ macvm_set_rip(cpu, rip + ins_len);\n+ break;\n+ case VMX_REASON_VMCALL:\n+ /* TODO: inject #GP fault */\n+ break;\n+ default:\n+ fprintf(stderr, \"%llx: unhandled exit %llx\\n\", rip, exit_reason);\n }\n } while (ret == 0);\n \n@@ -946,17 +972,16 @@ static int hvf_accel_init(MachineState *ms)\n assert_hvf_ok(ret);\n \n s = (HVFState *)g_malloc0(sizeof(HVFState));\n- \n+\n s->num_slots = 32;\n for (x = 0; x < s->num_slots; ++x) {\n s->slots[x].size = 0;\n s->slots[x].slot_id = x;\n }\n- \n+\n hvf_state = s;\n cpu_interrupt_handler = hvf_handle_interrupt;\n memory_listener_register(&hvf_memory_listener, &address_space_memory);\n- memory_listener_register(&hvf_io_listener, &address_space_io);\n return 0;\n }\n \ndiff --git a/target/i386/hvf-i386.h b/target/i386/hvf-i386.h\nindex f3f958058a..797718ce34 100644\n--- a/target/i386/hvf-i386.h\n+++ b/target/i386/hvf-i386.h\n@@ -41,7 +41,7 @@ struct hvf_state {\n /* Functions exported to host specific mode */\n \n /* Host specific functions */\n-int hvf_inject_interrupt(CPUArchState * env, int vector);\n+int hvf_inject_interrupt(CPUArchState *env, int vector);\n int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);\n #endif\n \ndiff --git a/target/i386/hvf-utils/vmcs.h b/target/i386/hvf-utils/vmcs.h\nindex 6f7ccb361a..c410dcfaaa 100644\n--- a/target/i386/hvf-utils/vmcs.h\n+++ b/target/i386/hvf-utils/vmcs.h\n@@ -27,326 +27,326 @@\n */\n \n #ifndef _VMCS_H_\n-#define\t_VMCS_H_\n+#define _VMCS_H_\n \n #include <Hypervisor/hv.h>\n #include <Hypervisor/hv_vmx.h>\n \n-#define\tVMCS_INITIAL\t\t\t0xffffffffffffffff\n+#define VMCS_INITIAL 0xffffffffffffffff\n \n-#define\tVMCS_IDENT(encoding)\t\t((encoding) | 0x80000000)\n+#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)\n /*\n * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.\n */\n-#define\tVMCS_INVALID_ENCODING\t\t0xffffffff\n+#define VMCS_INVALID_ENCODING 0xffffffff\n \n /* 16-bit control fields */\n-#define\tVMCS_VPID\t\t\t0x00000000\n-#define\tVMCS_PIR_VECTOR\t\t\t0x00000002\n+#define VMCS_VPID 0x00000000\n+#define VMCS_PIR_VECTOR 0x00000002\n \n /* 16-bit guest-state fields */\n-#define\tVMCS_GUEST_ES_SELECTOR\t\t0x00000800\n-#define\tVMCS_GUEST_CS_SELECTOR\t\t0x00000802\n-#define\tVMCS_GUEST_SS_SELECTOR\t\t0x00000804\n-#define\tVMCS_GUEST_DS_SELECTOR\t\t0x00000806\n-#define\tVMCS_GUEST_FS_SELECTOR\t\t0x00000808\n-#define\tVMCS_GUEST_GS_SELECTOR\t\t0x0000080A\n-#define\tVMCS_GUEST_LDTR_SELECTOR\t0x0000080C\n-#define\tVMCS_GUEST_TR_SELECTOR\t\t0x0000080E\n-#define\tVMCS_GUEST_INTR_STATUS\t\t0x00000810\n+#define VMCS_GUEST_ES_SELECTOR 0x00000800\n+#define VMCS_GUEST_CS_SELECTOR 0x00000802\n+#define VMCS_GUEST_SS_SELECTOR 0x00000804\n+#define VMCS_GUEST_DS_SELECTOR 0x00000806\n+#define VMCS_GUEST_FS_SELECTOR 0x00000808\n+#define VMCS_GUEST_GS_SELECTOR 0x0000080A\n+#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C\n+#define VMCS_GUEST_TR_SELECTOR 0x0000080E\n+#define VMCS_GUEST_INTR_STATUS 0x00000810\n \n /* 16-bit host-state fields */\n-#define\tVMCS_HOST_ES_SELECTOR\t\t0x00000C00\n-#define\tVMCS_HOST_CS_SELECTOR\t\t0x00000C02\n-#define\tVMCS_HOST_SS_SELECTOR\t\t0x00000C04\n-#define\tVMCS_HOST_DS_SELECTOR\t\t0x00000C06\n-#define\tVMCS_HOST_FS_SELECTOR\t\t0x00000C08\n-#define\tVMCS_HOST_GS_SELECTOR\t\t0x00000C0A\n-#define\tVMCS_HOST_TR_SELECTOR\t\t0x00000C0C\n+#define VMCS_HOST_ES_SELECTOR 0x00000C00\n+#define VMCS_HOST_CS_SELECTOR 0x00000C02\n+#define VMCS_HOST_SS_SELECTOR 0x00000C04\n+#define VMCS_HOST_DS_SELECTOR 0x00000C06\n+#define VMCS_HOST_FS_SELECTOR 0x00000C08\n+#define VMCS_HOST_GS_SELECTOR 0x00000C0A\n+#define VMCS_HOST_TR_SELECTOR 0x00000C0C\n \n /* 64-bit control fields */\n-#define\tVMCS_IO_BITMAP_A\t\t0x00002000\n-#define\tVMCS_IO_BITMAP_B\t\t0x00002002\n-#define\tVMCS_MSR_BITMAP\t\t\t0x00002004\n-#define\tVMCS_EXIT_MSR_STORE\t\t0x00002006\n-#define\tVMCS_EXIT_MSR_LOAD\t\t0x00002008\n-#define\tVMCS_ENTRY_MSR_LOAD\t\t0x0000200A\n-#define\tVMCS_EXECUTIVE_VMCS\t\t0x0000200C\n-#define\tVMCS_TSC_OFFSET\t\t\t0x00002010\n-#define\tVMCS_VIRTUAL_APIC\t\t0x00002012\n-#define\tVMCS_APIC_ACCESS\t\t0x00002014\n-#define\tVMCS_PIR_DESC\t\t\t0x00002016\n-#define\tVMCS_EPTP\t\t\t0x0000201A\n-#define\tVMCS_EOI_EXIT0\t\t\t0x0000201C\n-#define\tVMCS_EOI_EXIT1\t\t\t0x0000201E\n-#define\tVMCS_EOI_EXIT2\t\t\t0x00002020\n-#define\tVMCS_EOI_EXIT3\t\t\t0x00002022\n-#define\tVMCS_EOI_EXIT(vector)\t\t(VMCS_EOI_EXIT0 + ((vector) / 64) * 2)\n+#define VMCS_IO_BITMAP_A 0x00002000\n+#define VMCS_IO_BITMAP_B 0x00002002\n+#define VMCS_MSR_BITMAP 0x00002004\n+#define VMCS_EXIT_MSR_STORE 0x00002006\n+#define VMCS_EXIT_MSR_LOAD 0x00002008\n+#define VMCS_ENTRY_MSR_LOAD 0x0000200A\n+#define VMCS_EXECUTIVE_VMCS 0x0000200C\n+#define VMCS_TSC_OFFSET 0x00002010\n+#define VMCS_VIRTUAL_APIC 0x00002012\n+#define VMCS_APIC_ACCESS 0x00002014\n+#define VMCS_PIR_DESC 0x00002016\n+#define VMCS_EPTP 0x0000201A\n+#define VMCS_EOI_EXIT0 0x0000201C\n+#define VMCS_EOI_EXIT1 0x0000201E\n+#define VMCS_EOI_EXIT2 0x00002020\n+#define VMCS_EOI_EXIT3 0x00002022\n+#define VMCS_EOI_EXIT(vector) (VMCS_EOI_EXIT0 + ((vector) / 64) * 2)\n \n /* 64-bit read-only fields */\n-#define\tVMCS_GUEST_PHYSICAL_ADDRESS\t0x00002400\n+#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400\n \n /* 64-bit guest-state fields */\n-#define\tVMCS_LINK_POINTER\t\t0x00002800\n-#define\tVMCS_GUEST_IA32_DEBUGCTL\t0x00002802\n-#define\tVMCS_GUEST_IA32_PAT\t\t0x00002804\n-#define\tVMCS_GUEST_IA32_EFER\t\t0x00002806\n-#define\tVMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808\n-#define\tVMCS_GUEST_PDPTE0\t\t0x0000280A\n-#define\tVMCS_GUEST_PDPTE1\t\t0x0000280C\n-#define\tVMCS_GUEST_PDPTE2\t\t0x0000280E\n-#define\tVMCS_GUEST_PDPTE3\t\t0x00002810\n+#define VMCS_LINK_POINTER 0x00002800\n+#define VMCS_GUEST_IA32_DEBUGCTL 0x00002802\n+#define VMCS_GUEST_IA32_PAT 0x00002804\n+#define VMCS_GUEST_IA32_EFER 0x00002806\n+#define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808\n+#define VMCS_GUEST_PDPTE0 0x0000280A\n+#define VMCS_GUEST_PDPTE1 0x0000280C\n+#define VMCS_GUEST_PDPTE2 0x0000280E\n+#define VMCS_GUEST_PDPTE3 0x00002810\n \n /* 64-bit host-state fields */\n-#define\tVMCS_HOST_IA32_PAT\t\t0x00002C00\n-#define\tVMCS_HOST_IA32_EFER\t\t0x00002C02\n-#define\tVMCS_HOST_IA32_PERF_GLOBAL_CTRL\t0x00002C04\n+#define VMCS_HOST_IA32_PAT 0x00002C00\n+#define VMCS_HOST_IA32_EFER 0x00002C02\n+#define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04\n \n /* 32-bit control fields */\n-#define\tVMCS_PIN_BASED_CTLS\t\t0x00004000\n-#define\tVMCS_PRI_PROC_BASED_CTLS\t0x00004002\n-#define\tVMCS_EXCEPTION_BITMAP\t\t0x00004004\n-#define\tVMCS_PF_ERROR_MASK\t\t0x00004006\n-#define\tVMCS_PF_ERROR_MATCH\t\t0x00004008\n-#define\tVMCS_CR3_TARGET_COUNT\t\t0x0000400A\n-#define\tVMCS_EXIT_CTLS\t\t\t0x0000400C\n-#define\tVMCS_EXIT_MSR_STORE_COUNT\t0x0000400E\n-#define\tVMCS_EXIT_MSR_LOAD_COUNT\t0x00004010\n-#define\tVMCS_ENTRY_CTLS\t\t\t0x00004012\n-#define\tVMCS_ENTRY_MSR_LOAD_COUNT\t0x00004014\n-#define\tVMCS_ENTRY_INTR_INFO\t\t0x00004016\n-#define\tVMCS_ENTRY_EXCEPTION_ERROR\t0x00004018\n-#define\tVMCS_ENTRY_INST_LENGTH\t\t0x0000401A\n-#define\tVMCS_TPR_THRESHOLD\t\t0x0000401C\n-#define\tVMCS_SEC_PROC_BASED_CTLS\t0x0000401E\n-#define\tVMCS_PLE_GAP\t\t\t0x00004020\n-#define\tVMCS_PLE_WINDOW\t\t\t0x00004022\n+#define VMCS_PIN_BASED_CTLS 0x00004000\n+#define VMCS_PRI_PROC_BASED_CTLS 0x00004002\n+#define VMCS_EXCEPTION_BITMAP 0x00004004\n+#define VMCS_PF_ERROR_MASK 0x00004006\n+#define VMCS_PF_ERROR_MATCH 0x00004008\n+#define VMCS_CR3_TARGET_COUNT 0x0000400A\n+#define VMCS_EXIT_CTLS 0x0000400C\n+#define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E\n+#define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010\n+#define VMCS_ENTRY_CTLS 0x00004012\n+#define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014\n+#define VMCS_ENTRY_INTR_INFO 0x00004016\n+#define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018\n+#define VMCS_ENTRY_INST_LENGTH 0x0000401A\n+#define VMCS_TPR_THRESHOLD 0x0000401C\n+#define VMCS_SEC_PROC_BASED_CTLS 0x0000401E\n+#define VMCS_PLE_GAP 0x00004020\n+#define VMCS_PLE_WINDOW 0x00004022\n \n /* 32-bit read-only data fields */\n-#define\tVMCS_INSTRUCTION_ERROR\t\t0x00004400\n-#define\tVMCS_EXIT_REASON\t\t0x00004402\n-#define\tVMCS_EXIT_INTR_INFO\t\t0x00004404\n-#define\tVMCS_EXIT_INTR_ERRCODE\t\t0x00004406\n-#define\tVMCS_IDT_VECTORING_INFO\t\t0x00004408\n-#define\tVMCS_IDT_VECTORING_ERROR\t0x0000440A\n-#define\tVMCS_EXIT_INSTRUCTION_LENGTH\t0x0000440C\n-#define\tVMCS_EXIT_INSTRUCTION_INFO\t0x0000440E\n+#define VMCS_INSTRUCTION_ERROR 0x00004400\n+#define VMCS_EXIT_REASON 0x00004402\n+#define VMCS_EXIT_INTR_INFO 0x00004404\n+#define VMCS_EXIT_INTR_ERRCODE 0x00004406\n+#define VMCS_IDT_VECTORING_INFO 0x00004408\n+#define VMCS_IDT_VECTORING_ERROR 0x0000440A\n+#define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C\n+#define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E\n \n /* 32-bit guest-state fields */\n-#define\tVMCS_GUEST_ES_LIMIT\t\t0x00004800\n-#define\tVMCS_GUEST_CS_LIMIT\t\t0x00004802\n-#define\tVMCS_GUEST_SS_LIMIT\t\t0x00004804\n-#define\tVMCS_GUEST_DS_LIMIT\t\t0x00004806\n-#define\tVMCS_GUEST_FS_LIMIT\t\t0x00004808\n-#define\tVMCS_GUEST_GS_LIMIT\t\t0x0000480A\n-#define\tVMCS_GUEST_LDTR_LIMIT\t\t0x0000480C\n-#define\tVMCS_GUEST_TR_LIMIT\t\t0x0000480E\n-#define\tVMCS_GUEST_GDTR_LIMIT\t\t0x00004810\n-#define\tVMCS_GUEST_IDTR_LIMIT\t\t0x00004812\n-#define\tVMCS_GUEST_ES_ACCESS_RIGHTS\t0x00004814\n-#define\tVMCS_GUEST_CS_ACCESS_RIGHTS\t0x00004816\n-#define\tVMCS_GUEST_SS_ACCESS_RIGHTS\t0x00004818\n-#define\tVMCS_GUEST_DS_ACCESS_RIGHTS\t0x0000481A\n-#define\tVMCS_GUEST_FS_ACCESS_RIGHTS\t0x0000481C\n-#define\tVMCS_GUEST_GS_ACCESS_RIGHTS\t0x0000481E\n-#define\tVMCS_GUEST_LDTR_ACCESS_RIGHTS\t0x00004820\n-#define\tVMCS_GUEST_TR_ACCESS_RIGHTS\t0x00004822\n-#define\tVMCS_GUEST_INTERRUPTIBILITY\t0x00004824\n-#define\tVMCS_GUEST_ACTIVITY\t\t0x00004826\n-#define VMCS_GUEST_SMBASE\t\t0x00004828\n-#define\tVMCS_GUEST_IA32_SYSENTER_CS\t0x0000482A\n-#define\tVMCS_PREEMPTION_TIMER_VALUE\t0x0000482E\n+#define VMCS_GUEST_ES_LIMIT 0x00004800\n+#define VMCS_GUEST_CS_LIMIT 0x00004802\n+#define VMCS_GUEST_SS_LIMIT 0x00004804\n+#define VMCS_GUEST_DS_LIMIT 0x00004806\n+#define VMCS_GUEST_FS_LIMIT 0x00004808\n+#define VMCS_GUEST_GS_LIMIT 0x0000480A\n+#define VMCS_GUEST_LDTR_LIMIT 0x0000480C\n+#define VMCS_GUEST_TR_LIMIT 0x0000480E\n+#define VMCS_GUEST_GDTR_LIMIT 0x00004810\n+#define VMCS_GUEST_IDTR_LIMIT 0x00004812\n+#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814\n+#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816\n+#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818\n+#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A\n+#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C\n+#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E\n+#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820\n+#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822\n+#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824\n+#define VMCS_GUEST_ACTIVITY 0x00004826\n+#define VMCS_GUEST_SMBASE 0x00004828\n+#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A\n+#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E\n \n /* 32-bit host state fields */\n-#define\tVMCS_HOST_IA32_SYSENTER_CS\t0x00004C00\n+#define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00\n \n /* Natural Width control fields */\n-#define\tVMCS_CR0_MASK\t\t\t0x00006000\n-#define\tVMCS_CR4_MASK\t\t\t0x00006002\n-#define\tVMCS_CR0_SHADOW\t\t\t0x00006004\n-#define\tVMCS_CR4_SHADOW\t\t\t0x00006006\n-#define\tVMCS_CR3_TARGET0\t\t0x00006008\n-#define\tVMCS_CR3_TARGET1\t\t0x0000600A\n-#define\tVMCS_CR3_TARGET2\t\t0x0000600C\n-#define\tVMCS_CR3_TARGET3\t\t0x0000600E\n+#define VMCS_CR0_MASK 0x00006000\n+#define VMCS_CR4_MASK 0x00006002\n+#define VMCS_CR0_SHADOW 0x00006004\n+#define VMCS_CR4_SHADOW 0x00006006\n+#define VMCS_CR3_TARGET0 0x00006008\n+#define VMCS_CR3_TARGET1 0x0000600A\n+#define VMCS_CR3_TARGET2 0x0000600C\n+#define VMCS_CR3_TARGET3 0x0000600E\n \n /* Natural Width read-only fields */\n-#define\tVMCS_EXIT_QUALIFICATION\t\t0x00006400\n-#define\tVMCS_IO_RCX\t\t\t0x00006402\n-#define\tVMCS_IO_RSI\t\t\t0x00006404\n-#define\tVMCS_IO_RDI\t\t\t0x00006406\n-#define\tVMCS_IO_RIP\t\t\t0x00006408\n-#define\tVMCS_GUEST_LINEAR_ADDRESS\t0x0000640A\n+#define VMCS_EXIT_QUALIFICATION 0x00006400\n+#define VMCS_IO_RCX 0x00006402\n+#define VMCS_IO_RSI 0x00006404\n+#define VMCS_IO_RDI 0x00006406\n+#define VMCS_IO_RIP 0x00006408\n+#define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A\n \n /* Natural Width guest-state fields */\n-#define\tVMCS_GUEST_CR0\t\t\t0x00006800\n-#define\tVMCS_GUEST_CR3\t\t\t0x00006802\n-#define\tVMCS_GUEST_CR4\t\t\t0x00006804\n-#define\tVMCS_GUEST_ES_BASE\t\t0x00006806\n-#define\tVMCS_GUEST_CS_BASE\t\t0x00006808\n-#define\tVMCS_GUEST_SS_BASE\t\t0x0000680A\n-#define\tVMCS_GUEST_DS_BASE\t\t0x0000680C\n-#define\tVMCS_GUEST_FS_BASE\t\t0x0000680E\n-#define\tVMCS_GUEST_GS_BASE\t\t0x00006810\n-#define\tVMCS_GUEST_LDTR_BASE\t\t0x00006812\n-#define\tVMCS_GUEST_TR_BASE\t\t0x00006814\n-#define\tVMCS_GUEST_GDTR_BASE\t\t0x00006816\n-#define\tVMCS_GUEST_IDTR_BASE\t\t0x00006818\n-#define\tVMCS_GUEST_DR7\t\t\t0x0000681A\n-#define\tVMCS_GUEST_RSP\t\t\t0x0000681C\n-#define\tVMCS_GUEST_RIP\t\t\t0x0000681E\n-#define\tVMCS_GUEST_RFLAGS\t\t0x00006820\n-#define\tVMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822\n-#define\tVMCS_GUEST_IA32_SYSENTER_ESP\t0x00006824\n-#define\tVMCS_GUEST_IA32_SYSENTER_EIP\t0x00006826\n+#define VMCS_GUEST_CR0 0x00006800\n+#define VMCS_GUEST_CR3 0x00006802\n+#define VMCS_GUEST_CR4 0x00006804\n+#define VMCS_GUEST_ES_BASE 0x00006806\n+#define VMCS_GUEST_CS_BASE 0x00006808\n+#define VMCS_GUEST_SS_BASE 0x0000680A\n+#define VMCS_GUEST_DS_BASE 0x0000680C\n+#define VMCS_GUEST_FS_BASE 0x0000680E\n+#define VMCS_GUEST_GS_BASE 0x00006810\n+#define VMCS_GUEST_LDTR_BASE 0x00006812\n+#define VMCS_GUEST_TR_BASE 0x00006814\n+#define VMCS_GUEST_GDTR_BASE 0x00006816\n+#define VMCS_GUEST_IDTR_BASE 0x00006818\n+#define VMCS_GUEST_DR7 0x0000681A\n+#define VMCS_GUEST_RSP 0x0000681C\n+#define VMCS_GUEST_RIP 0x0000681E\n+#define VMCS_GUEST_RFLAGS 0x00006820\n+#define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822\n+#define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824\n+#define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826\n \n /* Natural Width host-state fields */\n-#define\tVMCS_HOST_CR0\t\t\t0x00006C00\n-#define\tVMCS_HOST_CR3\t\t\t0x00006C02\n-#define\tVMCS_HOST_CR4\t\t\t0x00006C04\n-#define\tVMCS_HOST_FS_BASE\t\t0x00006C06\n-#define\tVMCS_HOST_GS_BASE\t\t0x00006C08\n-#define\tVMCS_HOST_TR_BASE\t\t0x00006C0A\n-#define\tVMCS_HOST_GDTR_BASE\t\t0x00006C0C\n-#define\tVMCS_HOST_IDTR_BASE\t\t0x00006C0E\n-#define\tVMCS_HOST_IA32_SYSENTER_ESP\t0x00006C10\n-#define\tVMCS_HOST_IA32_SYSENTER_EIP\t0x00006C12\n-#define\tVMCS_HOST_RSP\t\t\t0x00006C14\n-#define\tVMCS_HOST_RIP\t\t\t0x00006c16\n+#define VMCS_HOST_CR0 0x00006C00\n+#define VMCS_HOST_CR3 0x00006C02\n+#define VMCS_HOST_CR4 0x00006C04\n+#define VMCS_HOST_FS_BASE 0x00006C06\n+#define VMCS_HOST_GS_BASE 0x00006C08\n+#define VMCS_HOST_TR_BASE 0x00006C0A\n+#define VMCS_HOST_GDTR_BASE 0x00006C0C\n+#define VMCS_HOST_IDTR_BASE 0x00006C0E\n+#define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10\n+#define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12\n+#define VMCS_HOST_RSP 0x00006C14\n+#define VMCS_HOST_RIP 0x00006c16\n \n /*\n * VM instruction error numbers\n */\n-#define\tVMRESUME_WITH_NON_LAUNCHED_VMCS\t5\n+#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5\n \n /*\n * VMCS exit reasons\n */\n-#define EXIT_REASON_EXCEPTION\t\t0\n-#define EXIT_REASON_EXT_INTR\t\t1\n-#define EXIT_REASON_TRIPLE_FAULT\t2\n-#define EXIT_REASON_INIT\t\t3\n-#define EXIT_REASON_SIPI\t\t4\n-#define EXIT_REASON_IO_SMI\t\t5\n-#define EXIT_REASON_SMI\t\t\t6\n-#define EXIT_REASON_INTR_WINDOW\t\t7\n-#define EXIT_REASON_NMI_WINDOW\t\t8\n-#define EXIT_REASON_TASK_SWITCH\t\t9\n-#define EXIT_REASON_CPUID\t\t10\n-#define EXIT_REASON_GETSEC\t\t11\n-#define EXIT_REASON_HLT\t\t\t12\n-#define EXIT_REASON_INVD\t\t13\n-#define EXIT_REASON_INVLPG\t\t14\n-#define EXIT_REASON_RDPMC\t\t15\n-#define EXIT_REASON_RDTSC\t\t16\n-#define EXIT_REASON_RSM\t\t\t17\n-#define EXIT_REASON_VMCALL\t\t18\n-#define EXIT_REASON_VMCLEAR\t\t19\n-#define EXIT_REASON_VMLAUNCH\t\t20\n-#define EXIT_REASON_VMPTRLD\t\t21\n-#define EXIT_REASON_VMPTRST\t\t22\n-#define EXIT_REASON_VMREAD\t\t23\n-#define EXIT_REASON_VMRESUME\t\t24\n-#define EXIT_REASON_VMWRITE\t\t25\n-#define EXIT_REASON_VMXOFF\t\t26\n-#define EXIT_REASON_VMXON\t\t27\n-#define EXIT_REASON_CR_ACCESS\t\t28\n-#define EXIT_REASON_DR_ACCESS\t\t29\n-#define EXIT_REASON_INOUT\t\t30\n-#define EXIT_REASON_RDMSR\t\t31\n-#define EXIT_REASON_WRMSR\t\t32\n-#define EXIT_REASON_INVAL_VMCS\t\t33\n-#define EXIT_REASON_INVAL_MSR\t\t34\n-#define EXIT_REASON_MWAIT\t\t36\n-#define EXIT_REASON_MTF\t\t\t37\n-#define EXIT_REASON_MONITOR\t\t39\n-#define EXIT_REASON_PAUSE\t\t40\n-#define EXIT_REASON_MCE_DURING_ENTRY\t41\n-#define EXIT_REASON_TPR\t\t\t43\n-#define EXIT_REASON_APIC_ACCESS\t\t44\n-#define\tEXIT_REASON_VIRTUALIZED_EOI\t45\n-#define EXIT_REASON_GDTR_IDTR\t\t46\n-#define EXIT_REASON_LDTR_TR\t\t47\n-#define EXIT_REASON_EPT_FAULT\t\t48\n-#define EXIT_REASON_EPT_MISCONFIG\t49\n-#define EXIT_REASON_INVEPT\t\t50\n-#define EXIT_REASON_RDTSCP\t\t51\n-#define EXIT_REASON_VMX_PREEMPT\t\t52\n-#define EXIT_REASON_INVVPID\t\t53\n-#define EXIT_REASON_WBINVD\t\t54\n-#define EXIT_REASON_XSETBV\t\t55\n-#define\tEXIT_REASON_APIC_WRITE\t\t56\n+#define EXIT_REASON_EXCEPTION 0\n+#define EXIT_REASON_EXT_INTR 1\n+#define EXIT_REASON_TRIPLE_FAULT 2\n+#define EXIT_REASON_INIT 3\n+#define EXIT_REASON_SIPI 4\n+#define EXIT_REASON_IO_SMI 5\n+#define EXIT_REASON_SMI 6\n+#define EXIT_REASON_INTR_WINDOW 7\n+#define EXIT_REASON_NMI_WINDOW 8\n+#define EXIT_REASON_TASK_SWITCH 9\n+#define EXIT_REASON_CPUID 10\n+#define EXIT_REASON_GETSEC 11\n+#define EXIT_REASON_HLT 12\n+#define EXIT_REASON_INVD 13\n+#define EXIT_REASON_INVLPG 14\n+#define EXIT_REASON_RDPMC 15\n+#define EXIT_REASON_RDTSC 16\n+#define EXIT_REASON_RSM 17\n+#define EXIT_REASON_VMCALL 18\n+#define EXIT_REASON_VMCLEAR 19\n+#define EXIT_REASON_VMLAUNCH 20\n+#define EXIT_REASON_VMPTRLD 21\n+#define EXIT_REASON_VMPTRST 22\n+#define EXIT_REASON_VMREAD 23\n+#define EXIT_REASON_VMRESUME 24\n+#define EXIT_REASON_VMWRITE 25\n+#define EXIT_REASON_VMXOFF 26\n+#define EXIT_REASON_VMXON 27\n+#define EXIT_REASON_CR_ACCESS 28\n+#define EXIT_REASON_DR_ACCESS 29\n+#define EXIT_REASON_INOUT 30\n+#define EXIT_REASON_RDMSR 31\n+#define EXIT_REASON_WRMSR 32\n+#define EXIT_REASON_INVAL_VMCS 33\n+#define EXIT_REASON_INVAL_MSR 34\n+#define EXIT_REASON_MWAIT 36\n+#define EXIT_REASON_MTF 37\n+#define EXIT_REASON_MONITOR 39\n+#define EXIT_REASON_PAUSE 40\n+#define EXIT_REASON_MCE_DURING_ENTR 41\n+#define EXIT_REASON_TPR 43\n+#define EXIT_REASON_APIC_ACCESS 44\n+#define EXIT_REASON_VIRTUALIZED_EOI 45\n+#define EXIT_REASON_GDTR_IDTR 46\n+#define EXIT_REASON_LDTR_TR 47\n+#define EXIT_REASON_EPT_FAULT 48\n+#define EXIT_REASON_EPT_MISCONFIG 49\n+#define EXIT_REASON_INVEPT 50\n+#define EXIT_REASON_RDTSCP 51\n+#define EXIT_REASON_VMX_PREEMPT 52\n+#define EXIT_REASON_INVVPID 53\n+#define EXIT_REASON_WBINVD 54\n+#define EXIT_REASON_XSETBV 55\n+#define EXIT_REASON_APIC_WRITE 56\n \n /*\n * NMI unblocking due to IRET.\n *\n * Applies to VM-exits due to hardware exception or EPT fault.\n */\n-#define\tEXIT_QUAL_NMIUDTI\t(1 << 12)\n+#define EXIT_QUAL_NMIUDTI (1 << 12)\n /*\n * VMCS interrupt information fields\n */\n-#define\tVMCS_INTR_VALID\t\t(1U << 31)\n-#define\tVMCS_INTR_T_MASK\t0x700\t\t/* Interruption-info type */\n-#define\tVMCS_INTR_T_HWINTR\t(0 << 8)\n-#define\tVMCS_INTR_T_NMI\t\t(2 << 8)\n-#define\tVMCS_INTR_T_HWEXCEPTION\t(3 << 8)\n-#define\tVMCS_INTR_T_SWINTR\t(4 << 8)\n-#define\tVMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)\n-#define\tVMCS_INTR_T_SWEXCEPTION\t(6 << 8)\n-#define\tVMCS_INTR_DEL_ERRCODE\t(1 << 11)\n+#define VMCS_INTR_VALID (1U << 31)\n+#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */\n+#define VMCS_INTR_T_HWINTR (0 << 8)\n+#define VMCS_INTR_T_NMI (2 << 8)\n+#define VMCS_INTR_T_HWEXCEPTION (3 << 8)\n+#define VMCS_INTR_T_SWINTR (4 << 8)\n+#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)\n+#define VMCS_INTR_T_SWEXCEPTION (6 << 8)\n+#define VMCS_INTR_DEL_ERRCODE (1 << 11)\n \n /*\n * VMCS IDT-Vectoring information fields\n */\n-#define\tVMCS_IDT_VEC_VALID (1U << 31)\n-#define\tVMCS_IDT_VEC_TYPE 0x700\n-#define\tVMCS_IDT_VEC_ERRCODE_VALID\t(1U << 11)\n-#define\tVMCS_IDT_VEC_HWINTR (0 << 8)\n-#define\tVMCS_IDT_VEC_NMI (2 << 8)\n-#define\tVMCS_IDT_VEC_HWEXCEPTION\t(3 << 8)\n-#define\tVMCS_IDT_VEC_SWINTR (4 << 8)\n+#define VMCS_IDT_VEC_VALID (1U << 31)\n+#define VMCS_IDT_VEC_TYPE 0x700\n+#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)\n+#define VMCS_IDT_VEC_HWINTR (0 << 8)\n+#define VMCS_IDT_VEC_NMI (2 << 8)\n+#define VMCS_IDT_VEC_HWEXCEPTION (3 << 8)\n+#define VMCS_IDT_VEC_SWINTR (4 << 8)\n \n /*\n * VMCS Guest interruptibility field\n */\n-#define\tVMCS_INTERRUPTIBILITY_STI_BLOCKING\t(1 << 0)\n-#define\tVMCS_INTERRUPTIBILITY_MOVSS_BLOCKING\t(1 << 1)\n-#define\tVMCS_INTERRUPTIBILITY_SMI_BLOCKING\t(1 << 2)\n-#define\tVMCS_INTERRUPTIBILITY_NMI_BLOCKING\t(1 << 3)\n+#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)\n+#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)\n+#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)\n+#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)\n \n /*\n * Exit qualification for EXIT_REASON_INVAL_VMCS\n */\n-#define\tEXIT_QUAL_NMI_WHILE_STI_BLOCKING\t3\n+#define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3\n \n /*\n * Exit qualification for EPT violation\n */\n-#define\tEPT_VIOLATION_DATA_READ\t\t(1UL << 0)\n-#define\tEPT_VIOLATION_DATA_WRITE\t(1UL << 1)\n-#define\tEPT_VIOLATION_INST_FETCH\t(1UL << 2)\n-#define\tEPT_VIOLATION_GPA_READABLE\t(1UL << 3)\n-#define\tEPT_VIOLATION_GPA_WRITEABLE\t(1UL << 4)\n-#define\tEPT_VIOLATION_GPA_EXECUTABLE\t(1UL << 5)\n-#define\tEPT_VIOLATION_GLA_VALID\t\t(1UL << 7)\n-#define\tEPT_VIOLATION_XLAT_VALID\t(1UL << 8)\n+#define EPT_VIOLATION_DATA_READ (1UL << 0)\n+#define EPT_VIOLATION_DATA_WRITE (1UL << 1)\n+#define EPT_VIOLATION_INST_FETCH (1UL << 2)\n+#define EPT_VIOLATION_GPA_READABLE (1UL << 3)\n+#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)\n+#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)\n+#define EPT_VIOLATION_GLA_VALID (1UL << 7)\n+#define EPT_VIOLATION_XLAT_VALID (1UL << 8)\n \n /*\n * Exit qualification for APIC-access VM exit\n */\n-#define\tAPIC_ACCESS_OFFSET(qual)\t((qual) & 0xFFF)\n-#define\tAPIC_ACCESS_TYPE(qual)\t\t(((qual) >> 12) & 0xF)\n+#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFF)\n+#define APIC_ACCESS_TYPE(qual) (((qual) >> 12) & 0xF)\n \n /*\n * Exit qualification for APIC-write VM exit\n */\n-#define\tAPIC_WRITE_OFFSET(qual)\t\t((qual) & 0xFFF)\n+#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)\n \n \n-#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)\n-#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)\n-#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)\n+#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)\n+#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)\n+#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)\n #define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10)\n #define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12)\n #define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19)\n@@ -359,10 +359,10 @@\n #define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4)\n \n enum task_switch_reason {\n-\tTSR_CALL,\n-\tTSR_IRET,\n+ TSR_CALL,\n+ TSR_IRET,\n TSR_JMP,\n-\tTSR_IDT_GATE,\t/* task gate in IDT */\n+ TSR_IDT_GATE, /* task gate in IDT */\n };\n \n #endif\ndiff --git a/target/i386/hvf-utils/vmx.h b/target/i386/hvf-utils/vmx.h\nindex 8a080e6777..d086c8d253 100644\n--- a/target/i386/hvf-utils/vmx.h\n+++ b/target/i386/hvf-utils/vmx.h\n@@ -31,45 +31,45 @@\n \n #include \"exec/address-spaces.h\"\n \n-static uint64_t inline rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)\n+static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)\n {\n-\tuint64_t v;\n+ uint64_t v;\n \n-\tif (hv_vcpu_read_register(vcpu, reg, &v)) {\n-\t\tabort();\n-\t}\n+ if (hv_vcpu_read_register(vcpu, reg, &v)) {\n+ abort();\n+ }\n \n-\treturn v;\n+ return v;\n }\n \n /* write GPR */\n-static void inline wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)\n+static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)\n {\n-\tif (hv_vcpu_write_register(vcpu, reg, v)) {\n-\t\tabort();\n-\t}\n+ if (hv_vcpu_write_register(vcpu, reg, v)) {\n+ abort();\n+ }\n }\n \n /* read VMCS field */\n-static uint64_t inline rvmcs(hv_vcpuid_t vcpu, uint32_t field)\n+static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)\n {\n-\tuint64_t v;\n+ uint64_t v;\n \n-\thv_vmx_vcpu_read_vmcs(vcpu, field, &v);\n+ hv_vmx_vcpu_read_vmcs(vcpu, field, &v);\n \n-\treturn v;\n+ return v;\n }\n \n /* write VMCS field */\n-static void inline wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)\n+static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)\n {\n-\thv_vmx_vcpu_write_vmcs(vcpu, field, v);\n+ hv_vmx_vcpu_write_vmcs(vcpu, field, v);\n }\n \n /* desired control word constrained by hardware/hypervisor capabilities */\n-static uint64_t inline cap2ctrl(uint64_t cap, uint64_t ctrl)\n+static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)\n {\n-\treturn (ctrl | (cap & 0xffffffff)) & (cap >> 32);\n+ return (ctrl | (cap & 0xffffffff)) & (cap >> 32);\n }\n \n #define VM_ENTRY_GUEST_LMA (1LL << 9)\n@@ -91,11 +91,14 @@ static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)\n efer |= EFER_LMA;\n wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);\n entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);\n- wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) | VM_ENTRY_GUEST_LMA);\n+ wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |\n+ VM_ENTRY_GUEST_LMA);\n \n uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);\n- if ((efer & EFER_LME) && (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {\n- wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS, (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);\n+ if ((efer & EFER_LME) &&\n+ (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {\n+ wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,\n+ (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);\n }\n }\n \n@@ -110,39 +113,45 @@ static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)\n wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);\n }\n \n-static void inline macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)\n+static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)\n {\n int i;\n uint64_t pdpte[4] = {0, 0, 0, 0};\n uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);\n uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);\n \n- if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && !(efer & EFER_LME))\n- address_space_rw(&address_space_memory, rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,\n+ if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&\n+ !(efer & EFER_LME)) {\n+ address_space_rw(&address_space_memory,\n+ rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,\n MEMTXATTRS_UNSPECIFIED,\n (uint8_t *)pdpte, 32, 0);\n+ }\n \n- for (i = 0; i < 4; i++)\n+ for (i = 0; i < 4; i++) {\n wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);\n+ }\n \n wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);\n wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);\n \n cr0 &= ~CR0_CD;\n- wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE| CR0_ET);\n+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);\n \n if (efer & EFER_LME) {\n- if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG))\n- enter_long_mode(vcpu, cr0, efer);\n- if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG))\n+ if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {\n+ enter_long_mode(vcpu, cr0, efer);\n+ }\n+ if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {\n exit_long_mode(vcpu, cr0, efer);\n+ }\n }\n \n hv_vcpu_invalidate_tlb(vcpu);\n hv_vcpu_flush(vcpu);\n }\n \n-static void inline macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)\n+static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)\n {\n uint64_t guest_cr4 = cr4 | CR4_VMXE;\n \n@@ -153,7 +162,7 @@ static void inline macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)\n hv_vcpu_flush(vcpu);\n }\n \n-static void inline macvm_set_rip(CPUState *cpu, uint64_t rip)\n+static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)\n {\n uint64_t val;\n \n@@ -162,39 +171,44 @@ static void inline macvm_set_rip(CPUState *cpu, uint64_t rip)\n \n /* after moving forward in rip, we need to clean INTERRUPTABILITY */\n val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n- if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING))\n+ if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {\n wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,\n- val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n+ val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n+ }\n }\n \n-static void inline vmx_clear_nmi_blocking(CPUState *cpu)\n+static inline void vmx_clear_nmi_blocking(CPUState *cpu)\n {\n uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n }\n \n-static void inline vmx_set_nmi_blocking(CPUState *cpu)\n+static inline void vmx_set_nmi_blocking(CPUState *cpu)\n {\n uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n }\n \n-static void inline vmx_set_nmi_window_exiting(CPUState *cpu)\n+static inline void vmx_set_nmi_window_exiting(CPUState *cpu)\n {\n uint64_t val;\n val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |\n+ VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n \n }\n \n-static void inline vmx_clear_nmi_window_exiting(CPUState *cpu)\n+static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)\n {\n \n uint64_t val;\n val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &\n+ ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n }\n \n #endif\ndiff --git a/target/i386/hvf-utils/x86.c b/target/i386/hvf-utils/x86.c\nindex e3db2c9c8b..07eb5a8586 100644\n--- a/target/i386/hvf-utils/x86.c\n+++ b/target/i386/hvf-utils/x86.c\n@@ -26,35 +26,38 @@\n #include \"x86_mmu.h\"\n #include \"x86_descr.h\"\n \n-static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)\n+/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)\n {\n- uint32_t ar;\n-\n- if (!var->p) {\n- ar = 1 << 16;\n- return ar;\n- }\n-\n- ar = var->type & 15;\n- ar |= (var->s & 1) << 4;\n- ar |= (var->dpl & 3) << 5;\n- ar |= (var->p & 1) << 7;\n- ar |= (var->avl & 1) << 12;\n- ar |= (var->l & 1) << 13;\n- ar |= (var->db & 1) << 14;\n- ar |= (var->g & 1) << 15;\n- return ar;\n-}\n-\n-bool x86_read_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel)\n+ uint32_t ar;\n+\n+ if (!var->p) {\n+ ar = 1 << 16;\n+ return ar;\n+ }\n+\n+ ar = var->type & 15;\n+ ar |= (var->s & 1) << 4;\n+ ar |= (var->dpl & 3) << 5;\n+ ar |= (var->p & 1) << 7;\n+ ar |= (var->avl & 1) << 12;\n+ ar |= (var->l & 1) << 13;\n+ ar |= (var->db & 1) << 14;\n+ ar |= (var->g & 1) << 15;\n+ return ar;\n+}*/\n+\n+bool x86_read_segment_descriptor(struct CPUState *cpu,\n+ struct x86_segment_descriptor *desc,\n+ x68_segment_selector sel)\n {\n addr_t base;\n uint32_t limit;\n \n ZERO_INIT(*desc);\n- // valid gdt descriptors start from index 1\n- if (!sel.index && GDT_SEL == sel.ti)\n+ /* valid gdt descriptors start from index 1 */\n+ if (!sel.index && GDT_SEL == sel.ti) {\n return false;\n+ }\n \n if (GDT_SEL == sel.ti) {\n base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n@@ -64,18 +67,21 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, struct x86_segment_descri\n limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n }\n \n- if (sel.index * 8 >= limit)\n+ if (sel.index * 8 >= limit) {\n return false;\n+ }\n \n vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));\n return true;\n }\n \n-bool x86_write_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel)\n+bool x86_write_segment_descriptor(struct CPUState *cpu,\n+ struct x86_segment_descriptor *desc,\n+ x68_segment_selector sel)\n {\n addr_t base;\n uint32_t limit;\n- \n+\n if (GDT_SEL == sel.ti) {\n base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n@@ -83,23 +89,24 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, struct x86_segment_descr\n base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);\n limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n }\n- \n+\n if (sel.index * 8 >= limit) {\n- printf(\"%s: gdt limit\\n\", __FUNCTION__);\n+ printf(\"%s: gdt limit\\n\", __func__);\n return false;\n }\n vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));\n return true;\n }\n \n-bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate)\n+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,\n+ int gate)\n {\n addr_t base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);\n uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);\n \n ZERO_INIT(*idt_desc);\n if (gate * 8 >= limit) {\n- printf(\"%s: idt limit\\n\", __FUNCTION__);\n+ printf(\"%s: idt limit\\n\", __func__);\n return false;\n }\n \n@@ -120,7 +127,7 @@ bool x86_is_real(struct CPUState *cpu)\n \n bool x86_is_v8086(struct CPUState *cpu)\n {\n- return (x86_is_protected(cpu) && (RFLAGS(cpu) & RFLAGS_VM));\n+ return x86_is_protected(cpu) && (RFLAGS(cpu) & RFLAGS_VM);\n }\n \n bool x86_is_long_mode(struct CPUState *cpu)\n@@ -153,17 +160,18 @@ addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg)\n return vmx_read_segment_base(cpu, seg) + addr;\n }\n \n-addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size, x86_reg_segment seg)\n+addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,\n+ x86_reg_segment seg)\n {\n switch (size) {\n- case 2:\n- addr = (uint16_t)addr;\n- break;\n- case 4:\n- addr = (uint32_t)addr;\n- break;\n- default:\n- break;\n+ case 2:\n+ addr = (uint16_t)addr;\n+ break;\n+ case 4:\n+ addr = (uint32_t)addr;\n+ break;\n+ default:\n+ break;\n }\n return linear_addr(cpu, addr, seg);\n }\ndiff --git a/target/i386/hvf-utils/x86.h b/target/i386/hvf-utils/x86.h\nindex 5dffdd6568..435b49ae04 100644\n--- a/target/i386/hvf-utils/x86.h\n+++ b/target/i386/hvf-utils/x86.h\n@@ -25,26 +25,26 @@\n #include \"qemu-common.h\"\n #include \"x86_flags.h\"\n \n-// exceptions\n+/* exceptions */\n typedef enum x86_exception {\n- EXCEPTION_DE, // divide error\n- EXCEPTION_DB, // debug fault\n- EXCEPTION_NMI, // non-maskable interrupt\n- EXCEPTION_BP, // breakpoint\ttrap\n- EXCEPTION_OF, // overflow\ttrap\n- EXCEPTION_BR, // boundary range exceeded\tfault\n- EXCEPTION_UD, // undefined opcode\n- EXCEPTION_NM, // device not available\n- EXCEPTION_DF, // double fault\n- EXCEPTION_RSVD, // not defined\n- EXCEPTION_TS, // invalid TSS\tfault\n- EXCEPTION_NP, // not present\tfault\n- EXCEPTION_GP, // general protection\tfault\n- EXCEPTION_PF, // page fault\n- EXCEPTION_RSVD2, // not defined\n+ EXCEPTION_DE, /* divide error */\n+ EXCEPTION_DB, /* debug fault */\n+ EXCEPTION_NMI, /* non-maskable interrupt */\n+ EXCEPTION_BP, /* breakpoint trap */\n+ EXCEPTION_OF, /* overflow trap */\n+ EXCEPTION_BR, /* boundary range exceeded fault */\n+ EXCEPTION_UD, /* undefined opcode */\n+ EXCEPTION_NM, /* device not available */\n+ EXCEPTION_DF, /* double fault */\n+ EXCEPTION_RSVD, /* not defined */\n+ EXCEPTION_TS, /* invalid TSS fault */\n+ EXCEPTION_NP, /* not present fault */\n+ EXCEPTION_GP, /* general protection fault */\n+ EXCEPTION_PF, /* page fault */\n+ EXCEPTION_RSVD2, /* not defined */\n } x86_exception;\n \n-// general purpose regs\n+/* general purpose regs */\n typedef enum x86_reg_name {\n REG_RAX = 0,\n REG_RCX = 1,\n@@ -64,7 +64,7 @@ typedef enum x86_reg_name {\n REG_R15 = 15,\n } x86_reg_name;\n \n-// segment regs\n+/* segment regs */\n typedef enum x86_reg_segment {\n REG_SEG_ES = 0,\n REG_SEG_CS = 1,\n@@ -76,24 +76,23 @@ typedef enum x86_reg_segment {\n REG_SEG_TR = 7,\n } x86_reg_segment;\n \n-typedef struct x86_register\n-{\n+typedef struct x86_register {\n union {\n struct {\n- uint64_t rrx; // full 64 bit\n+ uint64_t rrx; /* full 64 bit */\n };\n struct {\n- uint32_t erx; // low 32 bit part\n+ uint32_t erx; /* low 32 bit part */\n uint32_t hi32_unused1;\n };\n struct {\n- uint16_t rx; // low 16 bit part\n+ uint16_t rx; /* low 16 bit part */\n uint16_t hi16_unused1;\n uint32_t hi32_unused2;\n };\n struct {\n- uint8_t lx; // low 8 bit part\n- uint8_t hx; // high 8 bit\n+ uint8_t lx; /* low 8 bit part */\n+ uint8_t hx; /* high 8 bit */\n uint16_t hi16_unused2;\n uint32_t hi32_unused3;\n };\n@@ -120,7 +119,7 @@ typedef enum x86_rflags {\n RFLAGS_ID = (1L << 21),\n } x86_rflags;\n \n-// rflags register\n+/* rflags register */\n typedef struct x86_reg_flags {\n union {\n struct {\n@@ -205,7 +204,7 @@ typedef enum x86_reg_cr4 {\n CR4_SMEP = (1L << 20),\n } x86_reg_cr4;\n \n-// 16 bit Task State Segment\n+/* 16 bit Task State Segment */\n typedef struct x86_tss_segment16 {\n uint16_t link;\n uint16_t sp0;\n@@ -231,9 +230,8 @@ typedef struct x86_tss_segment16 {\n uint16_t ldtr;\n } __attribute__((packed)) x86_tss_segment16;\n \n-// 32 bit Task State Segment\n-typedef struct x86_tss_segment32\n-{\n+/* 32 bit Task State Segment */\n+typedef struct x86_tss_segment32 {\n uint32_t prev_tss;\n uint32_t esp0;\n uint32_t ss0;\n@@ -263,9 +261,8 @@ typedef struct x86_tss_segment32\n uint16_t iomap_base;\n } __attribute__ ((__packed__)) x86_tss_segment32;\n \n-// 64 bit Task State Segment\n-typedef struct x86_tss_segment64\n-{\n+/* 64 bit Task State Segment */\n+typedef struct x86_tss_segment64 {\n uint32_t unused;\n uint64_t rsp0;\n uint64_t rsp1;\n@@ -283,7 +280,7 @@ typedef struct x86_tss_segment64\n uint16_t iomap_base;\n } __attribute__ ((__packed__)) x86_tss_segment64;\n \n-// segment descriptors\n+/* segment descriptors */\n typedef struct x86_segment_descriptor {\n uint64_t limit0:16;\n uint64_t base0:16;\n@@ -305,7 +302,8 @@ static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)\n return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);\n }\n \n-static inline void x86_set_segment_base(x86_segment_descriptor *desc, uint32_t base)\n+static inline void x86_set_segment_base(x86_segment_descriptor *desc,\n+ uint32_t base)\n {\n desc->base2 = base >> 24;\n desc->base1 = (base >> 16) & 0xff;\n@@ -315,12 +313,14 @@ static inline void x86_set_segment_base(x86_segment_descriptor *desc, uint32_t b\n static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)\n {\n uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);\n- if (desc->g)\n+ if (desc->g) {\n return (limit << 12) | 0xfff;\n+ }\n return limit;\n }\n \n-static inline void x86_set_segment_limit(x86_segment_descriptor *desc, uint32_t limit)\n+static inline void x86_set_segment_limit(x86_segment_descriptor *desc,\n+ uint32_t limit)\n {\n desc->limit0 = limit & 0xffff;\n desc->limit1 = limit >> 16;\n@@ -356,11 +356,11 @@ typedef struct x68_segment_selector {\n };\n } __attribute__ ((__packed__)) x68_segment_selector;\n \n-// Definition of hvf_x86_state is here\n+/* Definition of hvf_x86_state is here */\n struct hvf_x86_state {\n int hlt;\n uint64_t init_tsc;\n- \n+\n int interruptable;\n uint64_t exp_rip;\n uint64_t fetch_rip;\n@@ -370,7 +370,7 @@ struct hvf_x86_state {\n struct lazy_flags lflags;\n struct x86_efer efer;\n uint8_t mmio_buf[4096];\n- uint8_t* apic_page;\n+ uint8_t *apic_page;\n };\n \n /*\n@@ -380,7 +380,7 @@ struct hvf_xsave_buf {\n uint32_t data[1024];\n };\n \n-// useful register access macros\n+/* useful register access macros */\n #define RIP(cpu) (cpu->hvf_x86->rip)\n #define EIP(cpu) ((uint32_t)cpu->hvf_x86->rip)\n #define RFLAGS(cpu) (cpu->hvf_x86->rflags.rflags)\n@@ -436,13 +436,18 @@ struct hvf_xsave_buf {\n #define DH(cpu) RH(cpu, REG_RDX)\n #define BH(cpu) RH(cpu, REG_RBX)\n \n-// deal with GDT/LDT descriptors in memory\n-bool x86_read_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel);\n-bool x86_write_segment_descriptor(struct CPUState *cpu, struct x86_segment_descriptor *desc, x68_segment_selector sel);\n+/* deal with GDT/LDT descriptors in memory */\n+bool x86_read_segment_descriptor(struct CPUState *cpu,\n+ struct x86_segment_descriptor *desc,\n+ x68_segment_selector sel);\n+bool x86_write_segment_descriptor(struct CPUState *cpu,\n+ struct x86_segment_descriptor *desc,\n+ x68_segment_selector sel);\n \n-bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate);\n+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,\n+ int gate);\n \n-// helpers\n+/* helpers */\n bool x86_is_protected(struct CPUState *cpu);\n bool x86_is_real(struct CPUState *cpu);\n bool x86_is_v8086(struct CPUState *cpu);\n@@ -452,19 +457,20 @@ bool x86_is_paging_mode(struct CPUState *cpu);\n bool x86_is_pae_enabled(struct CPUState *cpu);\n \n addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg);\n-addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size, x86_reg_segment seg);\n+addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,\n+ x86_reg_segment seg);\n addr_t linear_rip(struct CPUState *cpu, addr_t rip);\n \n static inline uint64_t rdtscp(void)\n {\n uint64_t tsc;\n- __asm__ __volatile__(\"rdtscp; \" // serializing read of tsc\n- \"shl $32,%%rdx; \" // shift higher 32 bits stored in rdx up\n- \"or %%rdx,%%rax\" // and or onto rax\n- : \"=a\"(tsc) // output to tsc variable\n+ __asm__ __volatile__(\"rdtscp; \" /* serializing read of tsc */\n+ \"shl $32,%%rdx; \" /* shift higher 32 bits stored in rdx up */\n+ \"or %%rdx,%%rax\" /* and or onto rax */\n+ : \"=a\"(tsc) /* output to tsc variable */\n :\n- : \"%rcx\", \"%rdx\"); // rcx and rdx are clobbered\n- \n+ : \"%rcx\", \"%rdx\"); /* rcx and rdx are clobbered */\n+\n return tsc;\n }\n \ndiff --git a/target/i386/hvf-utils/x86_cpuid.c b/target/i386/hvf-utils/x86_cpuid.c\nindex e496cf001c..5d63bca8fd 100644\n--- a/target/i386/hvf-utils/x86_cpuid.c\n+++ b/target/i386/hvf-utils/x86_cpuid.c\n@@ -41,10 +41,10 @@ struct x86_cpuid builtin_cpus[] = {\n .model = 3,\n .stepping = 3,\n .features = PPRO_FEATURES,\n- .ext_features = /*CPUID_EXT_SSE3 |*/ CPUID_EXT_POPCNT, CPUID_MTRR | CPUID_CLFLUSH,\n- CPUID_PSE36,\n+ .ext_features = /*CPUID_EXT_SSE3 |*/ CPUID_EXT_POPCNT, CPUID_MTRR |\n+ CPUID_CLFLUSH, CPUID_PSE36,\n .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,\n- .ext3_features = 0,//CPUID_EXT3_LAHF_LM,\n+ .ext3_features = 0, /* CPUID_EXT3_LAHF_LM, */\n .xlevel = 0x80000004,\n .model_id = \"vmx32\",\n },\n@@ -92,14 +92,15 @@ struct x86_cpuid builtin_cpus[] = {\n },\n };\n \n-static struct x86_cpuid *_cpuid = NULL;\n+static struct x86_cpuid *_cpuid;\n \n-void init_cpuid(struct CPUState* cpu)\n+void init_cpuid(struct CPUState *cpu)\n {\n- _cpuid = &builtin_cpus[2]; // core2duo\n+ _cpuid = &builtin_cpus[2]; /* core2duo */\n }\n \n-void get_cpuid_func(struct CPUState* cpu, int func, int cnt, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)\n+void get_cpuid_func(struct CPUState *cpu, int func, int cnt, uint32_t *eax,\n+ uint32_t *ebx, uint32_t *ecx, uint32_t *edx)\n {\n uint32_t h_rax, h_rbx, h_rcx, h_rdx;\n host_cpuid(func, cnt, &h_rax, &h_rbx, &h_rcx, &h_rdx);\n@@ -107,164 +108,172 @@ void get_cpuid_func(struct CPUState* cpu, int func, int cnt, uint32_t *eax, uint\n \n \n *eax = *ebx = *ecx = *edx = 0;\n- switch(func) {\n- case 0:\n- *eax = _cpuid->level;\n- *ebx = _cpuid->vendor1;\n- *edx = _cpuid->vendor2;\n- *ecx = _cpuid->vendor3;\n- break;\n- case 1:\n- *eax = h_rax;//_cpuid->stepping | (_cpuid->model << 3) | (_cpuid->family << 6);\n- *ebx = (apic_id << 24) | (h_rbx & 0x00ffffff);\n- *ecx = h_rcx;\n- *edx = h_rdx;\n+ switch (func) {\n+ case 0:\n+ *eax = _cpuid->level;\n+ *ebx = _cpuid->vendor1;\n+ *edx = _cpuid->vendor2;\n+ *ecx = _cpuid->vendor3;\n+ break;\n+ case 1:\n+ *eax = h_rax;/*_cpuid->stepping | (_cpuid->model << 3) |\n+ (_cpuid->family << 6); */\n+ *ebx = (apic_id << 24) | (h_rbx & 0x00ffffff);\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n \n- if (cpu->nr_cores * cpu->nr_threads > 1) {\n- *ebx |= (cpu->nr_cores * cpu->nr_threads) << 16;\n- *edx |= 1 << 28; /* Enable Hyper-Threading */\n- }\n+ if (cpu->nr_cores * cpu->nr_threads > 1) {\n+ *ebx |= (cpu->nr_cores * cpu->nr_threads) << 16;\n+ *edx |= 1 << 28; /* Enable Hyper-Threading */\n+ }\n \n- *ecx = *ecx & ~(CPUID_EXT_OSXSAVE | CPUID_EXT_MONITOR | CPUID_EXT_X2APIC |\n- CPUID_EXT_VMX | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_TM2 | CPUID_EXT_PCID |\n- CPUID_EXT_EST | CPUID_EXT_SSE42 | CPUID_EXT_SSE41);\n- *ecx |= CPUID_EXT_HYPERVISOR;\n- break;\n- case 2:\n- /* cache info: needed for Pentium Pro compatibility */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 4:\n- /* cache info: needed for Core compatibility */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 5:\n- /* mwait info: needed for Core compatibility */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 6:\n- /* Thermal and Power Leaf */\n- *eax = 0;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n- case 7:\n- *eax = h_rax;\n- *ebx = h_rbx & ~(CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF | CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |\n- CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INVPCID);\n- *ecx = h_rcx & ~(CPUID_7_0_ECX_AVX512BMI);\n- *edx = h_rdx;\n- break;\n- case 9:\n- /* Direct Cache Access Information Leaf */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 0xA:\n- /* Architectural Performance Monitoring Leaf */\n- *eax = 0;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n- case 0xB:\n- /* CPU Topology Leaf */\n- *eax = 0;\n- *ebx = 0; /* Means that we don't support this leaf */\n- *ecx = 0;\n- *edx = 0;\n- break;\n- case 0xD:\n- *eax = h_rax;\n- if (!cnt)\n- *eax &= (XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK);\n- if (1 == cnt)\n- *eax &= (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC);\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 0x80000000:\n- *eax = _cpuid->xlevel;\n- *ebx = _cpuid->vendor1;\n- *edx = _cpuid->vendor2;\n- *ecx = _cpuid->vendor3;\n- break;\n- case 0x80000001:\n- *eax = h_rax;//_cpuid->stepping | (_cpuid->model << 3) | (_cpuid->family << 6);\n- *ebx = 0;\n- *ecx = _cpuid->ext3_features & h_rcx;\n- *edx = _cpuid->ext2_features & h_rdx;\n- break;\n- case 0x80000002:\n- case 0x80000003:\n- case 0x80000004:\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 0x80000005:\n- /* cache info (L1 cache) */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 0x80000006:\n- /* cache info (L2 cache) */\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = h_rcx;\n- *edx = h_rdx;\n- break;\n- case 0x80000007:\n- *eax = 0;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0; /* Note - We disable invariant TSC (bit 8) in purpose */\n- break;\n- case 0x80000008:\n- /* virtual & phys address size in low 2 bytes. */\n- *eax = h_rax;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n- case 0x8000000A:\n- *eax = 0;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n- case 0x80000019:\n- *eax = h_rax;\n- *ebx = h_rbx;\n- *ecx = 0;\n- *edx = 0;\n- case 0xC0000000:\n- *eax = _cpuid->xlevel2;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n- default:\n- *eax = 0;\n- *ebx = 0;\n- *ecx = 0;\n- *edx = 0;\n- break;\n+ *ecx = *ecx & ~(CPUID_EXT_OSXSAVE | CPUID_EXT_MONITOR |\n+ CPUID_EXT_X2APIC | CPUID_EXT_VMX |\n+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_TM2 |\n+ CPUID_EXT_PCID | CPUID_EXT_EST | CPUID_EXT_SSE42 |\n+ CPUID_EXT_SSE41);\n+ *ecx |= CPUID_EXT_HYPERVISOR;\n+ break;\n+ case 2:\n+ /* cache info: needed for Pentium Pro compatibility */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 4:\n+ /* cache info: needed for Core compatibility */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 5:\n+ /* mwait info: needed for Core compatibility */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 6:\n+ /* Thermal and Power Leaf */\n+ *eax = 0;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ case 7:\n+ *eax = h_rax;\n+ *ebx = h_rbx & ~(CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF |\n+ CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |\n+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL |\n+ CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INVPCID);\n+ *ecx = h_rcx & ~(CPUID_7_0_ECX_AVX512BMI);\n+ *edx = h_rdx;\n+ break;\n+ case 9:\n+ /* Direct Cache Access Information Leaf */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 0xA:\n+ /* Architectural Performance Monitoring Leaf */\n+ *eax = 0;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ case 0xB:\n+ /* CPU Topology Leaf */\n+ *eax = 0;\n+ *ebx = 0; /* Means that we don't support this leaf */\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ case 0xD:\n+ *eax = h_rax;\n+ if (!cnt) {\n+ *eax &= (XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK);\n+ }\n+ if (1 == cnt) {\n+ *eax &= (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC);\n+ }\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 0x80000000:\n+ *eax = _cpuid->xlevel;\n+ *ebx = _cpuid->vendor1;\n+ *edx = _cpuid->vendor2;\n+ *ecx = _cpuid->vendor3;\n+ break;\n+ case 0x80000001:\n+ *eax = h_rax;/*_cpuid->stepping | (_cpuid->model << 3) |\n+ (_cpuid->family << 6);*/\n+ *ebx = 0;\n+ *ecx = _cpuid->ext3_features & h_rcx;\n+ *edx = _cpuid->ext2_features & h_rdx;\n+ break;\n+ case 0x80000002:\n+ case 0x80000003:\n+ case 0x80000004:\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 0x80000005:\n+ /* cache info (L1 cache) */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 0x80000006:\n+ /* cache info (L2 cache) */\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = h_rcx;\n+ *edx = h_rdx;\n+ break;\n+ case 0x80000007:\n+ *eax = 0;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0; /* Note - We disable invariant TSC (bit 8) in purpose */\n+ break;\n+ case 0x80000008:\n+ /* virtual & phys address size in low 2 bytes. */\n+ *eax = h_rax;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ case 0x8000000A:\n+ *eax = 0;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ case 0x80000019:\n+ *eax = h_rax;\n+ *ebx = h_rbx;\n+ *ecx = 0;\n+ *edx = 0;\n+ case 0xC0000000:\n+ *eax = _cpuid->xlevel2;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n+ default:\n+ *eax = 0;\n+ *ebx = 0;\n+ *ecx = 0;\n+ *edx = 0;\n+ break;\n }\n }\ndiff --git a/target/i386/hvf-utils/x86_cpuid.h b/target/i386/hvf-utils/x86_cpuid.h\nindex 02f2f115b0..ab10b84b61 100644\n--- a/target/i386/hvf-utils/x86_cpuid.h\n+++ b/target/i386/hvf-utils/x86_cpuid.h\n@@ -35,7 +35,7 @@ struct x86_cpuid {\n uint32_t features, ext_features, ext2_features, ext3_features;\n uint32_t kvm_features, svm_features;\n uint32_t xlevel;\n- char model_id[48];\n+ char model_id[50];\n int vendor_override;\n uint32_t flags;\n uint32_t xlevel2;\n@@ -44,8 +44,9 @@ struct x86_cpuid {\n \n struct CPUState;\n \n-void init_cpuid(struct CPUState* cpu);\n-void get_cpuid_func(struct CPUState *cpu, int func, int cnt, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);\n+void init_cpuid(struct CPUState *cpu);\n+void get_cpuid_func(struct CPUState *cpu, int func, int cnt, uint32_t *eax,\n+ uint32_t *ebx, uint32_t *ecx, uint32_t *edx);\n \n #endif /* __CPUID_H__ */\n \ndiff --git a/target/i386/hvf-utils/x86_decode.c b/target/i386/hvf-utils/x86_decode.c\nindex b4d8e22449..4faf82f721 100644\n--- a/target/i386/hvf-utils/x86_decode.c\n+++ b/target/i386/hvf-utils/x86_decode.c\n@@ -29,9 +29,11 @@\n \n static void decode_invalid(CPUState *cpu, struct x86_decode *decode)\n {\n- printf(\"%llx: failed to decode instruction \", cpu->hvf_x86->fetch_rip - decode->len);\n- for (int i = 0; i < decode->opcode_len; i++)\n+ printf(\"%llx: failed to decode instruction \", cpu->hvf_x86->fetch_rip -\n+ decode->len);\n+ for (int i = 0; i < decode->opcode_len; i++) {\n printf(\"%x \", decode->opcode[i]);\n+ }\n printf(\"\\n\");\n VM_PANIC(\"decoder failed\\n\");\n }\n@@ -39,43 +41,44 @@ static void decode_invalid(CPUState *cpu, struct x86_decode *decode)\n uint64_t sign(uint64_t val, int size)\n {\n switch (size) {\n- case 1:\n- val = (int8_t)val;\n- break;\n- case 2:\n- val = (int16_t)val;\n- break;\n- case 4:\n- val = (int32_t)val;\n- break;\n- case 8:\n- val = (int64_t)val;\n- break;\n- default:\n- VM_PANIC_EX(\"%s invalid size %d\\n\", __FUNCTION__, size);\n- break;\n+ case 1:\n+ val = (int8_t)val;\n+ break;\n+ case 2:\n+ val = (int16_t)val;\n+ break;\n+ case 4:\n+ val = (int32_t)val;\n+ break;\n+ case 8:\n+ val = (int64_t)val;\n+ break;\n+ default:\n+ VM_PANIC_EX(\"%s invalid size %d\\n\", __func__, size);\n+ break;\n }\n return val;\n }\n \n-static inline uint64_t decode_bytes(CPUState *cpu, struct x86_decode *decode, int size)\n+static inline uint64_t decode_bytes(CPUState *cpu, struct x86_decode *decode,\n+ int size)\n {\n addr_t val = 0;\n- \n+\n switch (size) {\n- case 1:\n- case 2:\n- case 4:\n- case 8:\n- break;\n- default:\n- VM_PANIC_EX(\"%s invalid size %d\\n\", __FUNCTION__, size);\n- break;\n+ case 1:\n+ case 2:\n+ case 4:\n+ case 8:\n+ break;\n+ default:\n+ VM_PANIC_EX(\"%s invalid size %d\\n\", __func__, size);\n+ break;\n }\n addr_t va = linear_rip(cpu, RIP(cpu)) + decode->len;\n vmx_read_mem(cpu, &val, va, size);\n decode->len += size;\n- \n+\n return val;\n }\n \n@@ -99,68 +102,76 @@ static inline uint64_t decode_qword(CPUState *cpu, struct x86_decode *decode)\n return decode_bytes(cpu, decode, 8);\n }\n \n-static void decode_modrm_rm(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_modrm_rm(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_RM;\n }\n \n-static void decode_modrm_reg(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_modrm_reg(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = decode->modrm.reg;\n op->ptr = get_reg_ref(cpu, op->reg, decode->rex.r, decode->operand_size);\n }\n \n-static void decode_rax(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_rax(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = REG_RAX;\n op->ptr = get_reg_ref(cpu, op->reg, 0, decode->operand_size);\n }\n \n-static inline void decode_immediate(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *var, int size)\n+static inline void decode_immediate(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *var, int size)\n {\n var->type = X86_VAR_IMMEDIATE;\n var->size = size;\n switch (size) {\n- case 1:\n- var->val = decode_byte(cpu, decode);\n- break;\n- case 2:\n- var->val = decode_word(cpu, decode);\n- break;\n- case 4:\n- var->val = decode_dword(cpu, decode);\n- break;\n- case 8:\n- var->val = decode_qword(cpu, decode);\n- break;\n- default:\n- VM_PANIC_EX(\"bad size %d\\n\", size);\n+ case 1:\n+ var->val = decode_byte(cpu, decode);\n+ break;\n+ case 2:\n+ var->val = decode_word(cpu, decode);\n+ break;\n+ case 4:\n+ var->val = decode_dword(cpu, decode);\n+ break;\n+ case 8:\n+ var->val = decode_qword(cpu, decode);\n+ break;\n+ default:\n+ VM_PANIC_EX(\"bad size %d\\n\", size);\n }\n }\n \n-static void decode_imm8(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm8(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n decode_immediate(cpu, decode, op, 1);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm8_signed(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm8_signed(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n decode_immediate(cpu, decode, op, 1);\n op->val = sign(op->val, 1);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm16(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm16(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n decode_immediate(cpu, decode, op, 2);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n \n-static void decode_imm(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n if (8 == decode->operand_size) {\n decode_immediate(cpu, decode, op, 4);\n@@ -171,20 +182,23 @@ static void decode_imm(CPUState *cpu, struct x86_decode *decode, struct x86_deco\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm_signed(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm_signed(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n decode_immediate(cpu, decode, op, decode->operand_size);\n op->val = sign(op->val, decode->operand_size);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm_1(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm_1(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_IMMEDIATE;\n op->val = 1;\n }\n \n-static void decode_imm_0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_imm_0(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_IMMEDIATE;\n op->val = 0;\n@@ -194,54 +208,54 @@ static void decode_imm_0(CPUState *cpu, struct x86_decode *decode, struct x86_de\n static void decode_pushseg(CPUState *cpu, struct x86_decode *decode)\n {\n uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n- \n+\n decode->op[0].type = X86_VAR_REG;\n switch (op) {\n- case 0xe:\n- decode->op[0].reg = REG_SEG_CS;\n- break;\n- case 0x16:\n- decode->op[0].reg = REG_SEG_SS;\n- break;\n- case 0x1e:\n- decode->op[0].reg = REG_SEG_DS;\n- break;\n- case 0x06:\n- decode->op[0].reg = REG_SEG_ES;\n- break;\n- case 0xa0:\n- decode->op[0].reg = REG_SEG_FS;\n- break;\n- case 0xa8:\n- decode->op[0].reg = REG_SEG_GS;\n- break;\n+ case 0xe:\n+ decode->op[0].reg = REG_SEG_CS;\n+ break;\n+ case 0x16:\n+ decode->op[0].reg = REG_SEG_SS;\n+ break;\n+ case 0x1e:\n+ decode->op[0].reg = REG_SEG_DS;\n+ break;\n+ case 0x06:\n+ decode->op[0].reg = REG_SEG_ES;\n+ break;\n+ case 0xa0:\n+ decode->op[0].reg = REG_SEG_FS;\n+ break;\n+ case 0xa8:\n+ decode->op[0].reg = REG_SEG_GS;\n+ break;\n }\n }\n \n static void decode_popseg(CPUState *cpu, struct x86_decode *decode)\n {\n uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n- \n+\n decode->op[0].type = X86_VAR_REG;\n switch (op) {\n- case 0xf:\n- decode->op[0].reg = REG_SEG_CS;\n- break;\n- case 0x17:\n- decode->op[0].reg = REG_SEG_SS;\n- break;\n- case 0x1f:\n- decode->op[0].reg = REG_SEG_DS;\n- break;\n- case 0x07:\n- decode->op[0].reg = REG_SEG_ES;\n- break;\n- case 0xa1:\n- decode->op[0].reg = REG_SEG_FS;\n- break;\n- case 0xa9:\n- decode->op[0].reg = REG_SEG_GS;\n- break;\n+ case 0xf:\n+ decode->op[0].reg = REG_SEG_CS;\n+ break;\n+ case 0x17:\n+ decode->op[0].reg = REG_SEG_SS;\n+ break;\n+ case 0x1f:\n+ decode->op[0].reg = REG_SEG_DS;\n+ break;\n+ case 0x07:\n+ decode->op[0].reg = REG_SEG_ES;\n+ break;\n+ case 0xa1:\n+ decode->op[0].reg = REG_SEG_FS;\n+ break;\n+ case 0xa9:\n+ decode->op[0].reg = REG_SEG_GS;\n+ break;\n }\n }\n \n@@ -249,36 +263,41 @@ static void decode_incgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x40;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_decgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x48;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_incgroup2(CPUState *cpu, struct x86_decode *decode)\n {\n- if (!decode->modrm.reg)\n+ if (!decode->modrm.reg) {\n decode->cmd = X86_DECODE_CMD_INC;\n- else if (1 == decode->modrm.reg)\n+ } else if (1 == decode->modrm.reg) {\n decode->cmd = X86_DECODE_CMD_DEC;\n+ }\n }\n \n static void decode_pushgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x50;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_popgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x58;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_jxx(CPUState *cpu, struct x86_decode *decode)\n@@ -340,18 +359,18 @@ static void decode_f7group(CPUState *cpu, struct x86_decode *decode)\n decode_modrm_rm(cpu, decode, &decode->op[0]);\n \n switch (decode->modrm.reg) {\n- case 0:\n- case 1:\n- decode_imm(cpu, decode, &decode->op[1]);\n- break;\n- case 2:\n- break;\n- case 3:\n- decode->op[1].type = X86_VAR_IMMEDIATE;\n- decode->op[1].val = 0;\n- break;\n- default:\n- break;\n+ case 0:\n+ case 1:\n+ decode_imm(cpu, decode, &decode->op[1]);\n+ break;\n+ case 2:\n+ break;\n+ case 3:\n+ decode->op[1].type = X86_VAR_IMMEDIATE;\n+ decode->op[1].val = 0;\n+ break;\n+ default:\n+ break;\n }\n }\n \n@@ -359,18 +378,21 @@ static void decode_xchgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x90;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_movgroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0xb8;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n }\n \n-static void fetch_moffs(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void fetch_moffs(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_OFFSET;\n op->ptr = decode_bytes(cpu, decode, decode->addressing_size);\n@@ -380,11 +402,13 @@ static void decode_movgroup8(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0xb0;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n }\n \n-static void decode_rcx(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_rcx(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = REG_RCX;\n@@ -396,10 +420,14 @@ struct decode_tbl {\n enum x86_decode_cmd cmd;\n uint8_t operand_size;\n bool is_modrm;\n- void (*decode_op1)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op1);\n- void (*decode_op2)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op2);\n- void (*decode_op3)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op3);\n- void (*decode_op4)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op4);\n+ void (*decode_op1)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op1);\n+ void (*decode_op2)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op2);\n+ void (*decode_op3)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op3);\n+ void (*decode_op4)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op4);\n void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n addr_t flags_mask;\n };\n@@ -412,13 +440,16 @@ struct decode_x87_tbl {\n uint8_t operand_size;\n bool rev;\n bool pop;\n- void (*decode_op1)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op1);\n- void (*decode_op2)(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op2);\n+ void (*decode_op1)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op1);\n+ void (*decode_op2)(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op2);\n void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n addr_t flags_mask;\n };\n \n-struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL, decode_invalid};\n+struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL,\n+ decode_invalid};\n \n struct decode_tbl _decode_tbl1[255];\n struct decode_tbl _decode_tbl2[255];\n@@ -427,28 +458,35 @@ struct decode_x87_tbl _decode_tbl3[255];\n static void decode_x87_ins(CPUState *cpu, struct x86_decode *decode)\n {\n struct decode_x87_tbl *decoder;\n- \n+\n decode->is_fpu = true;\n int mode = decode->modrm.mod == 3 ? 1 : 0;\n- int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) | decode->modrm.reg;\n- \n+ int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) |\n+ decode->modrm.reg;\n+\n decoder = &_decode_tbl3[index];\n- \n+\n decode->cmd = decoder->cmd;\n- if (decoder->operand_size)\n+ if (decoder->operand_size) {\n decode->operand_size = decoder->operand_size;\n+ }\n decode->flags_mask = decoder->flags_mask;\n decode->fpop_stack = decoder->pop;\n decode->frev = decoder->rev;\n- \n- if (decoder->decode_op1)\n+\n+ if (decoder->decode_op1) {\n decoder->decode_op1(cpu, decode, &decode->op[0]);\n- if (decoder->decode_op2)\n+ }\n+ if (decoder->decode_op2) {\n decoder->decode_op2(cpu, decode, &decode->op[1]);\n- if (decoder->decode_postfix)\n+ }\n+ if (decoder->decode_postfix) {\n decoder->decode_postfix(cpu, decode);\n- \n- VM_PANIC_ON_EX(!decode->cmd, \"x87 opcode %x %x (%x %x) not decoded\\n\", decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg, decoder->modrm_mod);\n+ }\n+\n+ VM_PANIC_ON_EX(!decode->cmd, \"x87 opcode %x %x (%x %x) not decoded\\n\",\n+ decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg,\n+ decoder->modrm_mod);\n }\n \n static void decode_ffgroup(CPUState *cpu, struct x86_decode *decode)\n@@ -465,8 +503,9 @@ static void decode_ffgroup(CPUState *cpu, struct x86_decode *decode)\n X86_DECODE_CMD_INVL\n };\n decode->cmd = group[decode->modrm.reg];\n- if (decode->modrm.reg > 2)\n+ if (decode->modrm.reg > 2) {\n decode->flags_mask = 0;\n+ }\n }\n \n static void decode_sldtgroup(CPUState *cpu, struct x86_decode *decode)\n@@ -482,7 +521,8 @@ static void decode_sldtgroup(CPUState *cpu, struct x86_decode *decode)\n X86_DECODE_CMD_INVL\n };\n decode->cmd = group[decode->modrm.reg];\n- printf(\"%llx: decode_sldtgroup: %d\\n\", cpu->hvf_x86->fetch_rip, decode->modrm.reg);\n+ printf(\"%llx: decode_sldtgroup: %d\\n\", cpu->hvf_x86->fetch_rip,\n+ decode->modrm.reg);\n }\n \n static void decode_lidtgroup(CPUState *cpu, struct x86_decode *decode)\n@@ -524,28 +564,34 @@ static void decode_x87_general(CPUState *cpu, struct x86_decode *decode)\n decode->is_fpu = true;\n }\n \n-static void decode_x87_modrm_floatp(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_x87_modrm_floatp(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X87_VAR_FLOATP;\n }\n \n-static void decode_x87_modrm_intp(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_x87_modrm_intp(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X87_VAR_INTP;\n }\n \n-static void decode_x87_modrm_bytep(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_x87_modrm_bytep(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X87_VAR_BYTEP;\n }\n \n-static void decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X87_VAR_REG;\n op->reg = 0;\n }\n \n-static void decode_decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+static void decode_decode_x87_modrm_st0(CPUState *cpu,\n+ struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n op->type = X87_VAR_REG;\n op->reg = decode->modrm.modrm & 7;\n@@ -556,35 +602,35 @@ static void decode_aegroup(CPUState *cpu, struct x86_decode *decode)\n {\n decode->is_fpu = true;\n switch (decode->modrm.reg) {\n- case 0:\n- decode->cmd = X86_DECODE_CMD_FXSAVE;\n- decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n- break;\n- case 1:\n- decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n- decode->cmd = X86_DECODE_CMD_FXRSTOR;\n- break;\n- case 5:\n- if (decode->modrm.modrm == 0xe8) {\n- decode->cmd = X86_DECODE_CMD_LFENCE;\n- } else {\n- VM_PANIC(\"xrstor\");\n- }\n- break;\n- case 6:\n- VM_PANIC_ON(decode->modrm.modrm != 0xf0);\n- decode->cmd = X86_DECODE_CMD_MFENCE;\n- break;\n- case 7:\n- if (decode->modrm.modrm == 0xf8) {\n- decode->cmd = X86_DECODE_CMD_SFENCE;\n- } else {\n- decode->cmd = X86_DECODE_CMD_CLFLUSH;\n- }\n- break;\n- default:\n- VM_PANIC_ON_EX(1, \"0xae: reg %d\\n\", decode->modrm.reg);\n- break;\n+ case 0:\n+ decode->cmd = X86_DECODE_CMD_FXSAVE;\n+ decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+ break;\n+ case 1:\n+ decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+ decode->cmd = X86_DECODE_CMD_FXRSTOR;\n+ break;\n+ case 5:\n+ if (decode->modrm.modrm == 0xe8) {\n+ decode->cmd = X86_DECODE_CMD_LFENCE;\n+ } else {\n+ VM_PANIC(\"xrstor\");\n+ }\n+ break;\n+ case 6:\n+ VM_PANIC_ON(decode->modrm.modrm != 0xf0);\n+ decode->cmd = X86_DECODE_CMD_MFENCE;\n+ break;\n+ case 7:\n+ if (decode->modrm.modrm == 0xf8) {\n+ decode->cmd = X86_DECODE_CMD_SFENCE;\n+ } else {\n+ decode->cmd = X86_DECODE_CMD_CLFLUSH;\n+ }\n+ break;\n+ default:\n+ VM_PANIC_ON_EX(1, \"0xae: reg %d\\n\", decode->modrm.reg);\n+ break;\n }\n }\n \n@@ -592,568 +638,1003 @@ static void decode_bswap(CPUState *cpu, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[1] - 0xc8;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b, decode->operand_size);\n+ decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->operand_size);\n }\n \n static void decode_d9_4(CPUState *cpu, struct x86_decode *decode)\n {\n- switch(decode->modrm.modrm) {\n- case 0xe0:\n- // FCHS\n- decode->cmd = X86_DECODE_CMD_FCHS;\n- break;\n- case 0xe1:\n- decode->cmd = X86_DECODE_CMD_FABS;\n- break;\n- case 0xe4:\n- VM_PANIC_ON_EX(1, \"FTST\");\n- break;\n- case 0xe5:\n- // FXAM\n- decode->cmd = X86_DECODE_CMD_FXAM;\n- break;\n- default:\n- VM_PANIC_ON_EX(1, \"FLDENV\");\n- break;\n+ switch (decode->modrm.modrm) {\n+ case 0xe0:\n+ /* FCHS */\n+ decode->cmd = X86_DECODE_CMD_FCHS;\n+ break;\n+ case 0xe1:\n+ decode->cmd = X86_DECODE_CMD_FABS;\n+ break;\n+ case 0xe4:\n+ VM_PANIC_ON_EX(1, \"FTST\");\n+ break;\n+ case 0xe5:\n+ /* FXAM */\n+ decode->cmd = X86_DECODE_CMD_FXAM;\n+ break;\n+ default:\n+ VM_PANIC_ON_EX(1, \"FLDENV\");\n+ break;\n }\n }\n \n static void decode_db_4(CPUState *cpu, struct x86_decode *decode)\n {\n switch (decode->modrm.modrm) {\n- case 0xe0:\n- VM_PANIC_ON_EX(1, \"unhandled FNENI: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n- break;\n- case 0xe1:\n- VM_PANIC_ON_EX(1, \"unhandled FNDISI: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n- break;\n- case 0xe2:\n- VM_PANIC_ON_EX(1, \"unhandled FCLEX: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n- break;\n- case 0xe3:\n- decode->cmd = X86_DECODE_CMD_FNINIT;\n- break;\n- case 0xe4:\n- decode->cmd = X86_DECODE_CMD_FNSETPM;\n- break;\n- default:\n- VM_PANIC_ON_EX(1, \"unhandled fpu opcode: %x %x\\n\", decode->opcode[0], decode->modrm.modrm);\n- break;\n+ case 0xe0:\n+ VM_PANIC_ON_EX(1, \"unhandled FNENI: %x %x\\n\", decode->opcode[0],\n+ decode->modrm.modrm);\n+ break;\n+ case 0xe1:\n+ VM_PANIC_ON_EX(1, \"unhandled FNDISI: %x %x\\n\", decode->opcode[0],\n+ decode->modrm.modrm);\n+ break;\n+ case 0xe2:\n+ VM_PANIC_ON_EX(1, \"unhandled FCLEX: %x %x\\n\", decode->opcode[0],\n+ decode->modrm.modrm);\n+ break;\n+ case 0xe3:\n+ decode->cmd = X86_DECODE_CMD_FNINIT;\n+ break;\n+ case 0xe4:\n+ decode->cmd = X86_DECODE_CMD_FNSETPM;\n+ break;\n+ default:\n+ VM_PANIC_ON_EX(1, \"unhandled fpu opcode: %x %x\\n\", decode->opcode[0],\n+ decode->modrm.modrm);\n+ break;\n }\n }\n \n \n #define RFLAGS_MASK_NONE 0\n-#define RFLAGS_MASK_OSZAPC (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | RFLAGS_CF)\n-#define RFLAGS_MASK_LAHF (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | RFLAGS_CF)\n+#define RFLAGS_MASK_OSZAPC (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | \\\n+ RFLAGS_PF | RFLAGS_CF)\n+#define RFLAGS_MASK_LAHF (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | \\\n+ RFLAGS_CF)\n #define RFLAGS_MASK_CF (RFLAGS_CF)\n #define RFLAGS_MASK_IF (RFLAGS_IF)\n #define RFLAGS_MASK_TF (RFLAGS_TF)\n #define RFLAGS_MASK_DF (RFLAGS_DF)\n #define RFLAGS_MASK_ZF (RFLAGS_ZF)\n \n-struct decode_tbl _1op_inst[] =\n-{\n- {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- \n- {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- \n- {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- \n- {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x2f, X86_DECODE_CMD_DAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x3f, X86_DECODE_CMD_AAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x40, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x41, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x42, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x43, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x44, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x45, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x46, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- {0x47, X86_DECODE_CMD_INC, 0, false, NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n- \n- {0x48, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x49, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4a, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4b, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4c, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4d, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4e, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- {0x4f, X86_DECODE_CMD_DEC, 0, false, NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n- \n- {0x50, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x51, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x52, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x53, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x54, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x55, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x56, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- {0x57, X86_DECODE_CMD_PUSH, 0, false, NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n- \n- {0x58, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x59, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5a, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5b, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5c, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5d, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5e, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- {0x5f, X86_DECODE_CMD_POP, 0, false, NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n- \n- {0x60, X86_DECODE_CMD_PUSHA, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x61, X86_DECODE_CMD_POPA, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0x6c, X86_DECODE_CMD_INS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x6d, X86_DECODE_CMD_INS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x6e, X86_DECODE_CMD_OUTS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x6f, X86_DECODE_CMD_OUTS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0x70, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x71, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x72, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x73, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x74, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x75, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x76, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x77, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x78, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x79, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7a, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7b, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7c, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7d, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7e, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x7f, X86_DECODE_CMD_JXX, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- \n- {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n- {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n- {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n- {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed, NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n- {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0x90, X86_DECODE_CMD_NOP, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n- \n- {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n- \n- {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- //{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_POPF},\n- {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_LAHF},\n- \n- {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL, NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n- \n- {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xba, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL, NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n- \n- {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- \n- {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- //{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IRET},\n- \n- {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx, NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n- \n- {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xd7, X86_DECODE_CMD_XLAT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xda, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xde, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n- \n- {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xe3, X86_DECODE_CMD_JCXZ, 1, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- \n- {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xea, X86_DECODE_CMD_JMP_FAR, 0, false, NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n- {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xec, X86_DECODE_CMD_IN, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xed, X86_DECODE_CMD_IN, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xee, X86_DECODE_CMD_OUT, 1, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xef, X86_DECODE_CMD_OUT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xf4, X86_DECODE_CMD_HLT, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xf5, X86_DECODE_CMD_CMC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n- \n- {0xf6, X86_DECODE_CMD_INVL, 1, true, NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n- {0xf7, X86_DECODE_CMD_INVL, 0, true, NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n- \n- {0xf8, X86_DECODE_CMD_CLC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n- {0xf9, X86_DECODE_CMD_STC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n- \n- {0xfa, X86_DECODE_CMD_CLI, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n- {0xfb, X86_DECODE_CMD_STI, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n- {0xfc, X86_DECODE_CMD_CLD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n- {0xfd, X86_DECODE_CMD_STD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n- {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},\n- {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},\n+struct decode_tbl _1op_inst[] = {\n+ {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL,\n+ NULL, RFLAGS_MASK_OSZAPC},\n+ {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL,\n+ NULL, RFLAGS_MASK_OSZAPC},\n+ {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL,\n+ decode_pushseg, RFLAGS_MASK_NONE},\n+ {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL,\n+ decode_popseg, RFLAGS_MASK_NONE},\n+ {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,\n+ NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+ {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+\n+ {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+ {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+\n+ {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+ {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+\n+ {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x2f, X86_DECODE_CMD_DAS, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x3f, X86_DECODE_CMD_AAS, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x40, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x41, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x42, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x43, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x44, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x45, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x46, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+ {0x47, X86_DECODE_CMD_INC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},\n+\n+ {0x48, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x49, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4a, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4b, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4c, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4d, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4e, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+ {0x4f, X86_DECODE_CMD_DEC, 0, false,\n+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},\n+\n+ {0x50, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x51, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x52, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x53, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x54, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x55, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x56, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+ {0x57, X86_DECODE_CMD_PUSH, 0, false,\n+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},\n+\n+ {0x58, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x59, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5a, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5b, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5c, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5d, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5e, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+ {0x5f, X86_DECODE_CMD_POP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},\n+\n+ {0x60, X86_DECODE_CMD_PUSHA, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x61, X86_DECODE_CMD_POPA, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg,\n+ decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0x6c, X86_DECODE_CMD_INS, 1, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x6d, X86_DECODE_CMD_INS, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x6e, X86_DECODE_CMD_OUTS, 1, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x6f, X86_DECODE_CMD_OUTS, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0x70, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x71, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x72, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x73, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x74, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x75, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x76, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x77, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x78, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x79, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7a, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7b, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7c, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7d, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7e, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x7f, X86_DECODE_CMD_JXX, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+\n+ {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+ {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm,\n+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+ {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+ {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed,\n+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},\n+ {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm,\n+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg,\n+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0x90, X86_DECODE_CMD_NOP, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+ {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,\n+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},\n+\n+ {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL,\n+ NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n+\n+ {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_POPF},*/\n+ {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_LAHF},\n+\n+ {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+ {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},\n+\n+ {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xba, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+ {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL,\n+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},\n+\n+ {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+ {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+\n+ {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL,\n+ NULL, NULL, NULL, RFLAGS_MASK_IRET},*/\n+\n+ {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+ {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+ {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+ {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx,\n+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},\n+\n+ {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xd7, X86_DECODE_CMD_XLAT, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xda, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xde, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+ {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL,\n+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},\n+\n+ {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xe3, X86_DECODE_CMD_JCXZ, 1, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+\n+ {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xea, X86_DECODE_CMD_JMP_FAR, 0, false,\n+ NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},\n+ {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xec, X86_DECODE_CMD_IN, 1, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xed, X86_DECODE_CMD_IN, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xee, X86_DECODE_CMD_OUT, 1, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xef, X86_DECODE_CMD_OUT, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xf4, X86_DECODE_CMD_HLT, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xf5, X86_DECODE_CMD_CMC, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+\n+ {0xf6, X86_DECODE_CMD_INVL, 1, true,\n+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n+ {0xf7, X86_DECODE_CMD_INVL, 0, true,\n+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},\n+\n+ {0xf8, X86_DECODE_CMD_CLC, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+ {0xf9, X86_DECODE_CMD_STC, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},\n+\n+ {0xfa, X86_DECODE_CMD_CLI, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n+ {0xfb, X86_DECODE_CMD_STI, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},\n+ {0xfc, X86_DECODE_CMD_CLD, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n+ {0xfd, X86_DECODE_CMD_STD, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},\n+ {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},\n+ {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},\n };\n \n-struct decode_tbl _2op_inst[] =\n-{\n- {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},\n- {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},\n- {0x6, X86_DECODE_CMD_CLTS, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},\n- {0x9, X86_DECODE_CMD_WBINVD, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x18, X86_DECODE_CMD_PREFETCH, 0, true, NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n- {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x30, X86_DECODE_CMD_WRMSR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x31, X86_DECODE_CMD_RDTSC, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x32, X86_DECODE_CMD_RDMSR, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x77, X86_DECODE_CMD_EMMS, 0, false, NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n- {0x82, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x83, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x84, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x85, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x86, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x87, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x88, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x89, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8a, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8b, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8c, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8d, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8e, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x8f, X86_DECODE_CMD_JXX, 0, false, NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n- {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- {0xa2, X86_DECODE_CMD_CPUID, 0, false, NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_CF},\n- {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n- {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n- {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_CF},\n- {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},\n- \n- {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},\n- {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n- \n- {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm, NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},\n- \n- {0xc8, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xc9, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xca, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xcb, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xcc, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xcd, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xce, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n- {0xcf, X86_DECODE_CMD_BSWAP, 0, false, NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+struct decode_tbl _2op_inst[] = {\n+ {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},\n+ {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},\n+ {0x6, X86_DECODE_CMD_CLTS, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},\n+ {0x9, X86_DECODE_CMD_WBINVD, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x18, X86_DECODE_CMD_PREFETCH, 0, true,\n+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n+ {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm,\n+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm,\n+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg,\n+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg,\n+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x30, X86_DECODE_CMD_WRMSR, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x31, X86_DECODE_CMD_RDTSC, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x32, X86_DECODE_CMD_RDMSR, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x77, X86_DECODE_CMD_EMMS, 0, false,\n+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},\n+ {0x82, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x83, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x84, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x85, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x86, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x87, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x88, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x89, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8a, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8b, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8c, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8d, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8e, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x8f, X86_DECODE_CMD_JXX, 0, false,\n+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},\n+ {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+ {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+ {0xa2, X86_DECODE_CMD_CPUID, 0, false,\n+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_CF},\n+ {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},\n+ {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false,\n+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},\n+ {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_CF},\n+ {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},\n+\n+ {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,\n+ NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},\n+ {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+ {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg,\n+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},\n+\n+ {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm,\n+ NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},\n+\n+ {0xc8, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xc9, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xca, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xcb, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xcc, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xcd, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xce, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n+ {0xcf, X86_DECODE_CMD_BSWAP, 0, false,\n+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},\n };\n \n-struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL, NULL, decode_invalid, 0};\n-\n-struct decode_x87_tbl _x87_inst[] =\n-{\n- {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- \n- {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},\n- {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- //\n- {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- \n- {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, decode_db_4, RFLAGS_MASK_NONE},\n- {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false, decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n- \n- {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true, decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- \n- {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false, decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n- \n- {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true, decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true, decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n- {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true, decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,\n+ NULL, decode_invalid, 0};\n+\n+struct decode_x87_tbl _x87_inst[] = {\n+ {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,\n+ decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0,\n+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false,\n+ decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},\n+ {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL,\n+ RFLAGS_MASK_NONE},\n+ {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,\n+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,\n+ RFLAGS_MASK_NONE},\n+ {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,\n+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,\n+ RFLAGS_MASK_NONE},\n+ {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,\n+ RFLAGS_MASK_NONE},\n+ {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,\n+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,\n+ decode_db_4, RFLAGS_MASK_NONE},\n+ {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,\n+ RFLAGS_MASK_NONE},\n+ {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true,\n+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true,\n+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},\n+\n+ {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true,\n+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true,\n+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n+ {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true,\n+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n };\n \n-void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n addr_t ptr = 0;\n x86_reg_segment seg = REG_SEG_DS;\n@@ -1163,43 +1644,45 @@ void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode, struct x86_d\n goto calc_addr;\n }\n \n- if (decode->displacement_size)\n+ if (decode->displacement_size) {\n ptr = sign(decode->displacement, decode->displacement_size);\n+ }\n \n switch (decode->modrm.rm) {\n- case 0:\n- ptr += BX(cpu) + SI(cpu);\n- break;\n- case 1:\n- ptr += BX(cpu) + DI(cpu);\n- break;\n- case 2:\n- ptr += BP(cpu) + SI(cpu);\n- seg = REG_SEG_SS;\n- break;\n- case 3:\n- ptr += BP(cpu) + DI(cpu);\n- seg = REG_SEG_SS;\n- break;\n- case 4:\n- ptr += SI(cpu);\n- break;\n- case 5:\n- ptr += DI(cpu);\n- break;\n- case 6:\n- ptr += BP(cpu);\n- seg = REG_SEG_SS;\n- break;\n- case 7:\n- ptr += BX(cpu);\n- break;\n+ case 0:\n+ ptr += BX(cpu) + SI(cpu);\n+ break;\n+ case 1:\n+ ptr += BX(cpu) + DI(cpu);\n+ break;\n+ case 2:\n+ ptr += BP(cpu) + SI(cpu);\n+ seg = REG_SEG_SS;\n+ break;\n+ case 3:\n+ ptr += BP(cpu) + DI(cpu);\n+ seg = REG_SEG_SS;\n+ break;\n+ case 4:\n+ ptr += SI(cpu);\n+ break;\n+ case 5:\n+ ptr += DI(cpu);\n+ break;\n+ case 6:\n+ ptr += BP(cpu);\n+ seg = REG_SEG_SS;\n+ break;\n+ case 7:\n+ ptr += BX(cpu);\n+ break;\n }\n calc_addr:\n- if (X86_DECODE_CMD_LEA == decode->cmd)\n+ if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = (uint16_t)ptr;\n- else\n+ } else {\n op->ptr = decode_linear_addr(cpu, decode, (uint16_t)ptr, seg);\n+ }\n }\n \n addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n@@ -1207,24 +1690,25 @@ addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n addr_t ptr = 0;\n int which = 0;\n \n- if (is_extended)\n+ if (is_extended) {\n reg |= REG_R8;\n+ }\n \n \n switch (size) {\n- case 1:\n- if (is_extended || reg < 4) {\n- which = 1;\n- ptr = (addr_t)&RL(cpu, reg);\n- } else {\n- which = 2;\n- ptr = (addr_t)&RH(cpu, reg - 4);\n- }\n- break;\n- default:\n- which = 3;\n- ptr = (addr_t)&RRX(cpu, reg);\n- break;\n+ case 1:\n+ if (is_extended || reg < 4) {\n+ which = 1;\n+ ptr = (addr_t)&RL(cpu, reg);\n+ } else {\n+ which = 2;\n+ ptr = (addr_t)&RH(cpu, reg - 4);\n+ }\n+ break;\n+ default:\n+ which = 3;\n+ ptr = (addr_t)&RRX(cpu, reg);\n+ break;\n }\n return ptr;\n }\n@@ -1232,11 +1716,12 @@ addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size)\n {\n addr_t val = 0;\n- memcpy(&val, (void*)get_reg_ref(cpu, reg, is_extended, size), size);\n+ memcpy(&val, (void *)get_reg_ref(cpu, reg, is_extended, size), size);\n return val;\n }\n \n-static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode, x86_reg_segment *sel)\n+static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode,\n+ x86_reg_segment *sel)\n {\n addr_t base = 0;\n addr_t scaled_index = 0;\n@@ -1247,52 +1732,61 @@ static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode, x86_reg_segm\n *sel = REG_SEG_DS;\n \n if (decode->modrm.mod || base_reg != REG_RBP) {\n- if (decode->rex.b)\n+ if (decode->rex.b) {\n base_reg |= REG_R8;\n- if (REG_RSP == base_reg || REG_RBP == base_reg)\n+ }\n+ if (REG_RSP == base_reg || REG_RBP == base_reg) {\n *sel = REG_SEG_SS;\n+ }\n base = get_reg_val(cpu, decode->sib.base, decode->rex.b, addr_size);\n }\n \n- if (decode->rex.x)\n+ if (decode->rex.x) {\n index_reg |= REG_R8;\n+ }\n \n- if (index_reg != REG_RSP)\n- scaled_index = get_reg_val(cpu, index_reg, decode->rex.x, addr_size) << decode->sib.scale;\n+ if (index_reg != REG_RSP) {\n+ scaled_index = get_reg_val(cpu, index_reg, decode->rex.x, addr_size) <<\n+ decode->sib.scale;\n+ }\n return base + scaled_index;\n }\n \n-void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n x86_reg_segment seg = REG_SEG_DS;\n addr_t ptr = 0;\n int addr_size = decode->addressing_size;\n \n- if (decode->displacement_size)\n+ if (decode->displacement_size) {\n ptr = sign(decode->displacement, decode->displacement_size);\n+ }\n \n if (4 == decode->modrm.rm) {\n ptr += get_sib_val(cpu, decode, &seg);\n- }\n- else if (!decode->modrm.mod && 5 == decode->modrm.rm) {\n- if (x86_is_long_mode(cpu))\n+ } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {\n+ if (x86_is_long_mode(cpu)) {\n ptr += RIP(cpu) + decode->len;\n- else\n+ } else {\n ptr = decode->displacement;\n- }\n- else {\n- if (REG_RBP == decode->modrm.rm || REG_RSP == decode->modrm.rm)\n+ }\n+ } else {\n+ if (REG_RBP == decode->modrm.rm || REG_RSP == decode->modrm.rm) {\n seg = REG_SEG_SS;\n+ }\n ptr += get_reg_val(cpu, decode->modrm.rm, decode->rex.b, addr_size);\n }\n \n- if (X86_DECODE_CMD_LEA == decode->cmd)\n+ if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = (uint32_t)ptr;\n- else\n+ } else {\n op->ptr = decode_linear_addr(cpu, decode, (uint32_t)ptr, seg);\n+ }\n }\n \n-void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n x86_reg_segment seg = REG_SEG_DS;\n int32_t offset = 0;\n@@ -1300,46 +1794,51 @@ void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode, struct x86_d\n int rm = decode->modrm.rm;\n addr_t ptr;\n int src = decode->modrm.rm;\n- \n- if (decode->displacement_size)\n+\n+ if (decode->displacement_size) {\n offset = sign(decode->displacement, decode->displacement_size);\n+ }\n \n- if (4 == rm)\n+ if (4 == rm) {\n ptr = get_sib_val(cpu, decode, &seg) + offset;\n- else if (0 == mod && 5 == rm)\n+ } else if (0 == mod && 5 == rm) {\n ptr = RIP(cpu) + decode->len + (int32_t) offset;\n- else\n+ } else {\n ptr = get_reg_val(cpu, src, decode->rex.b, 8) + (int64_t) offset;\n- \n- if (X86_DECODE_CMD_LEA == decode->cmd)\n+ }\n+\n+ if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = ptr;\n- else\n+ } else {\n op->ptr = decode_linear_addr(cpu, decode, ptr, seg);\n+ }\n }\n \n \n-void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op)\n+void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op)\n {\n if (3 == decode->modrm.mod) {\n op->reg = decode->modrm.reg;\n op->type = X86_VAR_REG;\n- op->ptr = get_reg_ref(cpu, decode->modrm.rm, decode->rex.b, decode->operand_size);\n+ op->ptr = get_reg_ref(cpu, decode->modrm.rm, decode->rex.b,\n+ decode->operand_size);\n return;\n }\n \n switch (decode->addressing_size) {\n- case 2:\n- calc_modrm_operand16(cpu, decode, op);\n- break;\n- case 4:\n- calc_modrm_operand32(cpu, decode, op);\n- break;\n- case 8:\n- calc_modrm_operand64(cpu, decode, op);\n- break;\n- default:\n- VM_PANIC_EX(\"unsupported address size %d\\n\", decode->addressing_size);\n- break;\n+ case 2:\n+ calc_modrm_operand16(cpu, decode, op);\n+ break;\n+ case 4:\n+ calc_modrm_operand32(cpu, decode, op);\n+ break;\n+ case 8:\n+ calc_modrm_operand64(cpu, decode, op);\n+ break;\n+ default:\n+ VM_PANIC_EX(\"unsupported address size %d\\n\", decode->addressing_size);\n+ break;\n }\n }\n \n@@ -1348,36 +1847,36 @@ static void decode_prefix(CPUState *cpu, struct x86_decode *decode)\n while (1) {\n uint8_t byte = decode_byte(cpu, decode);\n switch (byte) {\n- case PREFIX_LOCK:\n- decode->lock = byte;\n- break;\n- case PREFIX_REPN:\n- case PREFIX_REP:\n- decode->rep = byte;\n- break;\n- case PREFIX_CS_SEG_OVEERIDE:\n- case PREFIX_SS_SEG_OVEERIDE:\n- case PREFIX_DS_SEG_OVEERIDE:\n- case PREFIX_ES_SEG_OVEERIDE:\n- case PREFIX_FS_SEG_OVEERIDE:\n- case PREFIX_GS_SEG_OVEERIDE:\n- decode->segment_override = byte;\n- break;\n- case PREFIX_OP_SIZE_OVERRIDE:\n- decode->op_size_override = byte;\n- break;\n- case PREFIX_ADDR_SIZE_OVERRIDE:\n- decode->addr_size_override = byte;\n+ case PREFIX_LOCK:\n+ decode->lock = byte;\n+ break;\n+ case PREFIX_REPN:\n+ case PREFIX_REP:\n+ decode->rep = byte;\n+ break;\n+ case PREFIX_CS_SEG_OVEERIDE:\n+ case PREFIX_SS_SEG_OVEERIDE:\n+ case PREFIX_DS_SEG_OVEERIDE:\n+ case PREFIX_ES_SEG_OVEERIDE:\n+ case PREFIX_FS_SEG_OVEERIDE:\n+ case PREFIX_GS_SEG_OVEERIDE:\n+ decode->segment_override = byte;\n+ break;\n+ case PREFIX_OP_SIZE_OVERRIDE:\n+ decode->op_size_override = byte;\n+ break;\n+ case PREFIX_ADDR_SIZE_OVERRIDE:\n+ decode->addr_size_override = byte;\n+ break;\n+ case PREFIX_REX ... (PREFIX_REX + 0xf):\n+ if (x86_is_long_mode(cpu)) {\n+ decode->rex.rex = byte;\n break;\n- case PREFIX_REX ... (PREFIX_REX + 0xf):\n- if (x86_is_long_mode(cpu)) {\n- decode->rex.rex = byte;\n- break;\n- }\n- // fall through when not in long mode\n- default:\n- decode->len--;\n- return;\n+ }\n+ /* fall through when not in long mode */\n+ default:\n+ decode->len--;\n+ return;\n }\n }\n }\n@@ -1386,33 +1885,36 @@ void set_addressing_size(CPUState *cpu, struct x86_decode *decode)\n {\n decode->addressing_size = -1;\n if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n- if (decode->addr_size_override)\n+ if (decode->addr_size_override) {\n decode->addressing_size = 4;\n- else\n+ } else {\n decode->addressing_size = 2;\n- }\n- else if (!x86_is_long_mode(cpu)) {\n- // protected\n+ }\n+ } else if (!x86_is_long_mode(cpu)) {\n+ /* protected */\n struct vmx_segment cs;\n vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n- // check db\n+ /* check db */\n if ((cs.ar >> 14) & 1) {\n- if (decode->addr_size_override)\n+ if (decode->addr_size_override) {\n decode->addressing_size = 2;\n- else\n+ } else {\n decode->addressing_size = 4;\n+ }\n } else {\n- if (decode->addr_size_override)\n+ if (decode->addr_size_override) {\n decode->addressing_size = 4;\n- else\n+ } else {\n decode->addressing_size = 2;\n+ }\n }\n } else {\n- // long\n- if (decode->addr_size_override)\n+ /* long */\n+ if (decode->addr_size_override) {\n decode->addressing_size = 4;\n- else\n+ } else {\n decode->addressing_size = 8;\n+ }\n }\n }\n \n@@ -1420,99 +1922,98 @@ void set_operand_size(CPUState *cpu, struct x86_decode *decode)\n {\n decode->operand_size = -1;\n if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n- if (decode->op_size_override)\n+ if (decode->op_size_override) {\n decode->operand_size = 4;\n- else\n+ } else {\n decode->operand_size = 2;\n- }\n- else if (!x86_is_long_mode(cpu)) {\n- // protected\n+ }\n+ } else if (!x86_is_long_mode(cpu)) {\n+ /* protected */\n struct vmx_segment cs;\n vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n- // check db\n+ /* check db */\n if ((cs.ar >> 14) & 1) {\n- if (decode->op_size_override)\n+ if (decode->op_size_override) {\n decode->operand_size = 2;\n- else\n+ } else{\n decode->operand_size = 4;\n+ }\n } else {\n- if (decode->op_size_override)\n+ if (decode->op_size_override) {\n decode->operand_size = 4;\n- else\n+ } else {\n decode->operand_size = 2;\n+ }\n }\n } else {\n- // long\n- if (decode->op_size_override)\n+ /* long */\n+ if (decode->op_size_override) {\n decode->operand_size = 2;\n- else\n+ } else {\n decode->operand_size = 4;\n+ }\n \n- if (decode->rex.w)\n+ if (decode->rex.w) {\n decode->operand_size = 8;\n+ }\n }\n }\n \n static void decode_sib(CPUState *cpu, struct x86_decode *decode)\n {\n- if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) && (decode->addressing_size != 2)) {\n+ if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&\n+ (decode->addressing_size != 2)) {\n decode->sib.sib = decode_byte(cpu, decode);\n decode->sib_present = true;\n }\n }\n \n-/* 16 bit modrm\n- * mod R/M\n- * 00\t[BX+SI] [BX+DI] [BP+SI] [BP+DI] [SI] [DI] [disp16]\t[BX]\n- * 01\t[BX+SI+disp8]\t[BX+DI+disp8]\t[BP+SI+disp8]\t[BP+DI+disp8]\t[SI+disp8]\t[DI+disp8]\t[BP+disp8]\t[BX+disp8]\n- * 10\t[BX+SI+disp16]\t[BX+DI+disp16]\t[BP+SI+disp16]\t[BP+DI+disp16]\t[SI+disp16]\t[DI+disp16]\t[BP+disp16]\t[BX+disp16]\n- * 11 - - - - - - - -\n- */\n-int disp16_tbl[4][8] =\n- {{0, 0, 0, 0, 0, 0, 2, 0},\n+/* 16 bit modrm */\n+int disp16_tbl[4][8] = {\n+ {0, 0, 0, 0, 0, 0, 2, 0},\n {1, 1, 1, 1, 1, 1, 1, 1},\n {2, 2, 2, 2, 2, 2, 2, 2},\n- {0, 0, 0, 0, 0, 0, 0, 0}};\n+ {0, 0, 0, 0, 0, 0, 0, 0}\n+};\n \n-/*\n- 32/64-bit\t modrm\n- Mod\n- 00 [r/m] [r/m] [r/m] [r/m] [SIB] [RIP/EIP1,2+disp32] [r/m] [r/m]\n- 01 [r/m+disp8] [r/m+disp8] [r/m+disp8] [r/m+disp8] [SIB+disp8] [r/m+disp8] [SIB+disp8] [r/m+disp8]\n- 10 [r/m+disp32] [r/m+disp32] [r/m+disp32] [r/m+disp32] [SIB+disp32] [r/m+disp32] [SIB+disp32]\t [r/m+disp32]\n- 11 - - - - - - - -\n- */\n-int disp32_tbl[4][8] =\n- {{0, 0, 0, 0, -1, 4, 0, 0},\n+/* 32/64-bit modrm */\n+int disp32_tbl[4][8] = {\n+ {0, 0, 0, 0, -1, 4, 0, 0},\n {1, 1, 1, 1, 1, 1, 1, 1},\n {4, 4, 4, 4, 4, 4, 4, 4},\n- {0, 0, 0, 0, 0, 0, 0, 0}};\n+ {0, 0, 0, 0, 0, 0, 0, 0}\n+};\n \n static inline void decode_displacement(CPUState *cpu, struct x86_decode *decode)\n {\n int addressing_size = decode->addressing_size;\n int mod = decode->modrm.mod;\n int rm = decode->modrm.rm;\n- \n+\n decode->displacement_size = 0;\n switch (addressing_size) {\n- case 2:\n- decode->displacement_size = disp16_tbl[mod][rm];\n- if (decode->displacement_size)\n- decode->displacement = (uint16_t)decode_bytes(cpu, decode, decode->displacement_size);\n- break;\n- case 4:\n- case 8:\n- if (-1 == disp32_tbl[mod][rm]) {\n- if (5 == decode->sib.base)\n- decode->displacement_size = 4;\n+ case 2:\n+ decode->displacement_size = disp16_tbl[mod][rm];\n+ if (decode->displacement_size) {\n+ decode->displacement = (uint16_t)decode_bytes(cpu, decode,\n+ decode->displacement_size);\n+ }\n+ break;\n+ case 4:\n+ case 8:\n+ if (-1 == disp32_tbl[mod][rm]) {\n+ if (5 == decode->sib.base) {\n+ decode->displacement_size = 4;\n }\n- else\n- decode->displacement_size = disp32_tbl[mod][rm];\n- \n- if (decode->displacement_size)\n- decode->displacement = (uint32_t)decode_bytes(cpu, decode, decode->displacement_size);\n- break;\n+ } else {\n+ decode->displacement_size = disp32_tbl[mod][rm];\n+ }\n+\n+ if (decode->displacement_size) {\n+ decode->displacement = (uint32_t)decode_bytes(cpu, decode,\n+ decode->displacement_size);\n+ }\n+ break;\n }\n }\n \n@@ -1520,40 +2021,52 @@ static inline void decode_modrm(CPUState *cpu, struct x86_decode *decode)\n {\n decode->modrm.modrm = decode_byte(cpu, decode);\n decode->is_modrm = true;\n- \n+\n decode_sib(cpu, decode);\n decode_displacement(cpu, decode);\n }\n \n-static inline void decode_opcode_general(CPUState *cpu, struct x86_decode *decode, uint8_t opcode, struct decode_tbl *inst_decoder)\n+static inline void decode_opcode_general(CPUState *cpu,\n+ struct x86_decode *decode,\n+ uint8_t opcode,\n+ struct decode_tbl *inst_decoder)\n {\n decode->cmd = inst_decoder->cmd;\n- if (inst_decoder->operand_size)\n+ if (inst_decoder->operand_size) {\n decode->operand_size = inst_decoder->operand_size;\n+ }\n decode->flags_mask = inst_decoder->flags_mask;\n- \n- if (inst_decoder->is_modrm)\n+\n+ if (inst_decoder->is_modrm) {\n decode_modrm(cpu, decode);\n- if (inst_decoder->decode_op1)\n+ }\n+ if (inst_decoder->decode_op1) {\n inst_decoder->decode_op1(cpu, decode, &decode->op[0]);\n- if (inst_decoder->decode_op2)\n+ }\n+ if (inst_decoder->decode_op2) {\n inst_decoder->decode_op2(cpu, decode, &decode->op[1]);\n- if (inst_decoder->decode_op3)\n+ }\n+ if (inst_decoder->decode_op3) {\n inst_decoder->decode_op3(cpu, decode, &decode->op[2]);\n- if (inst_decoder->decode_op4)\n+ }\n+ if (inst_decoder->decode_op4) {\n inst_decoder->decode_op4(cpu, decode, &decode->op[3]);\n- if (inst_decoder->decode_postfix)\n+ }\n+ if (inst_decoder->decode_postfix) {\n inst_decoder->decode_postfix(cpu, decode);\n+ }\n }\n \n-static inline void decode_opcode_1(CPUState *cpu, struct x86_decode *decode, uint8_t opcode)\n+static inline void decode_opcode_1(CPUState *cpu, struct x86_decode *decode,\n+ uint8_t opcode)\n {\n struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];\n decode_opcode_general(cpu, decode, opcode, inst_decoder);\n }\n \n \n-static inline void decode_opcode_2(CPUState *cpu, struct x86_decode *decode, uint8_t opcode)\n+static inline void decode_opcode_2(CPUState *cpu, struct x86_decode *decode,\n+ uint8_t opcode)\n {\n struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];\n decode_opcode_general(cpu, decode, opcode, inst_decoder);\n@@ -1562,7 +2075,7 @@ static inline void decode_opcode_2(CPUState *cpu, struct x86_decode *decode, uin\n static void decode_opcodes(CPUState *cpu, struct x86_decode *decode)\n {\n uint8_t opcode;\n- \n+\n opcode = decode_byte(cpu, decode);\n decode->opcode[decode->opcode_len++] = opcode;\n if (opcode != OPCODE_ESCAPE) {\n@@ -1583,21 +2096,24 @@ uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode)\n set_operand_size(cpu, decode);\n \n decode_opcodes(cpu, decode);\n- \n+\n return decode->len;\n }\n \n void init_decoder(CPUState *cpu)\n {\n int i;\n- \n- for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++)\n+\n+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {\n memcpy(_decode_tbl1, &invl_inst, sizeof(invl_inst));\n- for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++)\n+ }\n+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {\n memcpy(_decode_tbl2, &invl_inst, sizeof(invl_inst));\n- for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++)\n+ }\n+ for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {\n memcpy(_decode_tbl3, &invl_inst, sizeof(invl_inst_x87));\n- \n+ }\n+\n for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {\n _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];\n }\n@@ -1605,7 +2121,9 @@ void init_decoder(CPUState *cpu)\n _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];\n }\n for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {\n- int index = ((_x87_inst[i].opcode & 0xf) << 4) | ((_x87_inst[i].modrm_mod & 1) << 3) | _x87_inst[i].modrm_reg;\n+ int index = ((_x87_inst[i].opcode & 0xf) << 4) |\n+ ((_x87_inst[i].modrm_mod & 1) << 3) |\n+ _x87_inst[i].modrm_reg;\n _decode_tbl3[index] = _x87_inst[i];\n }\n }\n@@ -1613,47 +2131,55 @@ void init_decoder(CPUState *cpu)\n \n const char *decode_cmd_to_string(enum x86_decode_cmd cmd)\n {\n- static const char *cmds[] = {\"INVL\", \"PUSH\", \"PUSH_SEG\", \"POP\", \"POP_SEG\", \"MOV\", \"MOVSX\", \"MOVZX\", \"CALL_NEAR\",\n- \"CALL_NEAR_ABS_INDIRECT\", \"CALL_FAR_ABS_INDIRECT\", \"CMD_CALL_FAR\", \"RET_NEAR\", \"RET_FAR\", \"ADD\", \"OR\",\n- \"ADC\", \"SBB\", \"AND\", \"SUB\", \"XOR\", \"CMP\", \"INC\", \"DEC\", \"TST\", \"NOT\", \"NEG\", \"JMP_NEAR\", \"JMP_NEAR_ABS_INDIRECT\",\n- \"JMP_FAR\", \"JMP_FAR_ABS_INDIRECT\", \"LEA\", \"JXX\",\n- \"JCXZ\", \"SETXX\", \"MOV_TO_SEG\", \"MOV_FROM_SEG\", \"CLI\", \"STI\", \"CLD\", \"STD\", \"STC\",\n- \"CLC\", \"OUT\", \"IN\", \"INS\", \"OUTS\", \"LIDT\", \"SIDT\", \"LGDT\", \"SGDT\", \"SMSW\", \"LMSW\", \"RDTSCP\", \"INVLPG\", \"MOV_TO_CR\",\n- \"MOV_FROM_CR\", \"MOV_TO_DR\", \"MOV_FROM_DR\", \"PUSHF\", \"POPF\", \"CPUID\", \"ROL\", \"ROR\", \"RCL\", \"RCR\", \"SHL\", \"SAL\",\n- \"SHR\",\"SHRD\", \"SHLD\", \"SAR\", \"DIV\", \"IDIV\", \"MUL\", \"IMUL_3\", \"IMUL_2\", \"IMUL_1\", \"MOVS\", \"CMPS\", \"SCAS\",\n- \"LODS\", \"STOS\", \"BSWAP\", \"XCHG\", \"RDTSC\", \"RDMSR\", \"WRMSR\", \"ENTER\", \"LEAVE\", \"BT\", \"BTS\", \"BTC\", \"BTR\", \"BSF\",\n- \"BSR\", \"IRET\", \"INT\", \"POPA\", \"PUSHA\", \"CWD\", \"CBW\", \"DAS\", \"AAD\", \"AAM\", \"AAS\", \"LOOP\", \"SLDT\", \"STR\", \"LLDT\",\n- \"LTR\", \"VERR\", \"VERW\", \"SAHF\", \"LAHF\", \"WBINVD\", \"LDS\", \"LSS\", \"LES\", \"LGS\", \"LFS\", \"CMC\", \"XLAT\", \"NOP\", \"CMOV\",\n- \"CLTS\", \"XADD\", \"HLT\", \"CMPXCHG8B\", \"CMPXCHG\", \"POPCNT\",\n- \"FNINIT\", \"FLD\", \"FLDxx\", \"FNSTCW\", \"FNSTSW\", \"FNSETPM\", \"FSAVE\", \"FRSTOR\", \"FXSAVE\", \"FXRSTOR\", \"FDIV\", \"FMUL\",\n- \"FSUB\", \"FADD\", \"EMMS\", \"MFENCE\", \"SFENCE\", \"LFENCE\", \"PREFETCH\", \"FST\", \"FABS\", \"FUCOM\", \"FUCOMI\", \"FLDCW\",\n+ static const char *cmds[] = {\"INVL\", \"PUSH\", \"PUSH_SEG\", \"POP\", \"POP_SEG\",\n+ \"MOV\", \"MOVSX\", \"MOVZX\", \"CALL_NEAR\", \"CALL_NEAR_ABS_INDIRECT\",\n+ \"CALL_FAR_ABS_INDIRECT\", \"CMD_CALL_FAR\", \"RET_NEAR\", \"RET_FAR\", \"ADD\",\n+ \"OR\", \"ADC\", \"SBB\", \"AND\", \"SUB\", \"XOR\", \"CMP\", \"INC\", \"DEC\", \"TST\",\n+ \"NOT\", \"NEG\", \"JMP_NEAR\", \"JMP_NEAR_ABS_INDIRECT\", \"JMP_FAR\",\n+ \"JMP_FAR_ABS_INDIRECT\", \"LEA\", \"JXX\", \"JCXZ\", \"SETXX\", \"MOV_TO_SEG\",\n+ \"MOV_FROM_SEG\", \"CLI\", \"STI\", \"CLD\", \"STD\", \"STC\", \"CLC\", \"OUT\", \"IN\",\n+ \"INS\", \"OUTS\", \"LIDT\", \"SIDT\", \"LGDT\", \"SGDT\", \"SMSW\", \"LMSW\",\n+ \"RDTSCP\", \"INVLPG\", \"MOV_TO_CR\", \"MOV_FROM_CR\", \"MOV_TO_DR\",\n+ \"MOV_FROM_DR\", \"PUSHF\", \"POPF\", \"CPUID\", \"ROL\", \"ROR\", \"RCL\", \"RCR\",\n+ \"SHL\", \"SAL\", \"SHR\", \"SHRD\", \"SHLD\", \"SAR\", \"DIV\", \"IDIV\", \"MUL\",\n+ \"IMUL_3\", \"IMUL_2\", \"IMUL_1\", \"MOVS\", \"CMPS\", \"SCAS\", \"LODS\", \"STOS\",\n+ \"BSWAP\", \"XCHG\", \"RDTSC\", \"RDMSR\", \"WRMSR\", \"ENTER\", \"LEAVE\", \"BT\",\n+ \"BTS\", \"BTC\", \"BTR\", \"BSF\", \"BSR\", \"IRET\", \"INT\", \"POPA\", \"PUSHA\",\n+ \"CWD\", \"CBW\", \"DAS\", \"AAD\", \"AAM\", \"AAS\", \"LOOP\", \"SLDT\", \"STR\", \"LLDT\",\n+ \"LTR\", \"VERR\", \"VERW\", \"SAHF\", \"LAHF\", \"WBINVD\", \"LDS\", \"LSS\", \"LES\",\n+ \"LGS\", \"LFS\", \"CMC\", \"XLAT\", \"NOP\", \"CMOV\", \"CLTS\", \"XADD\", \"HLT\",\n+ \"CMPXCHG8B\", \"CMPXCHG\", \"POPCNT\", \"FNINIT\", \"FLD\", \"FLDxx\", \"FNSTCW\",\n+ \"FNSTSW\", \"FNSETPM\", \"FSAVE\", \"FRSTOR\", \"FXSAVE\", \"FXRSTOR\", \"FDIV\",\n+ \"FMUL\", \"FSUB\", \"FADD\", \"EMMS\", \"MFENCE\", \"SFENCE\", \"LFENCE\",\n+ \"PREFETCH\", \"FST\", \"FABS\", \"FUCOM\", \"FUCOMI\", \"FLDCW\",\n \"FXCH\", \"FCHS\", \"FCMOV\", \"FRNDINT\", \"FXAM\", \"LAST\"};\n return cmds[cmd];\n }\n \n-addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode, addr_t addr, x86_reg_segment seg)\n+addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode,\n+ addr_t addr, x86_reg_segment seg)\n {\n switch (decode->segment_override) {\n- case PREFIX_CS_SEG_OVEERIDE:\n- seg = REG_SEG_CS;\n- break;\n- case PREFIX_SS_SEG_OVEERIDE:\n- seg = REG_SEG_SS;\n- break;\n- case PREFIX_DS_SEG_OVEERIDE:\n- seg = REG_SEG_DS;\n- break;\n- case PREFIX_ES_SEG_OVEERIDE:\n- seg = REG_SEG_ES;\n- break;\n- case PREFIX_FS_SEG_OVEERIDE:\n- seg = REG_SEG_FS;\n- break;\n- case PREFIX_GS_SEG_OVEERIDE:\n- seg = REG_SEG_GS;\n- break;\n- default:\n- break;\n+ case PREFIX_CS_SEG_OVEERIDE:\n+ seg = REG_SEG_CS;\n+ break;\n+ case PREFIX_SS_SEG_OVEERIDE:\n+ seg = REG_SEG_SS;\n+ break;\n+ case PREFIX_DS_SEG_OVEERIDE:\n+ seg = REG_SEG_DS;\n+ break;\n+ case PREFIX_ES_SEG_OVEERIDE:\n+ seg = REG_SEG_ES;\n+ break;\n+ case PREFIX_FS_SEG_OVEERIDE:\n+ seg = REG_SEG_FS;\n+ break;\n+ case PREFIX_GS_SEG_OVEERIDE:\n+ seg = REG_SEG_GS;\n+ break;\n+ default:\n+ break;\n }\n return linear_addr_size(cpu, addr, decode->addressing_size, seg);\n }\ndiff --git a/target/i386/hvf-utils/x86_decode.h b/target/i386/hvf-utils/x86_decode.h\nindex 3a22d7d1a5..b6763e1ba1 100644\n--- a/target/i386/hvf-utils/x86_decode.h\n+++ b/target/i386/hvf-utils/x86_decode.h\n@@ -25,20 +25,20 @@\n #include \"x86.h\"\n \n typedef enum x86_prefix {\n- // group 1\n+ /* group 1 */\n PREFIX_LOCK = 0xf0,\n PREFIX_REPN = 0xf2,\n PREFIX_REP = 0xf3,\n- // group 2\n+ /* group 2 */\n PREFIX_CS_SEG_OVEERIDE = 0x2e,\n PREFIX_SS_SEG_OVEERIDE = 0x36,\n PREFIX_DS_SEG_OVEERIDE = 0x3e,\n PREFIX_ES_SEG_OVEERIDE = 0x26,\n PREFIX_FS_SEG_OVEERIDE = 0x64,\n PREFIX_GS_SEG_OVEERIDE = 0x65,\n- // group 3\n+ /* group 3 */\n PREFIX_OP_SIZE_OVERRIDE = 0x66,\n- // group 4\n+ /* group 4 */\n PREFIX_ADDR_SIZE_OVERRIDE = 0x67,\n \n PREFIX_REX = 0x40,\n@@ -46,7 +46,7 @@ typedef enum x86_prefix {\n \n enum x86_decode_cmd {\n X86_DECODE_CMD_INVL = 0,\n- \n+\n X86_DECODE_CMD_PUSH,\n X86_DECODE_CMD_PUSH_SEG,\n X86_DECODE_CMD_POP,\n@@ -177,7 +177,7 @@ enum x86_decode_cmd {\n X86_DECODE_CMD_CMPXCHG8B,\n X86_DECODE_CMD_CMPXCHG,\n X86_DECODE_CMD_POPCNT,\n- \n+\n X86_DECODE_CMD_FNINIT,\n X86_DECODE_CMD_FLD,\n X86_DECODE_CMD_FLDxx,\n@@ -255,7 +255,7 @@ typedef enum x86_var_type {\n X86_VAR_REG,\n X86_VAR_RM,\n \n- // for floating point computations\n+ /* for floating point computations */\n X87_VAR_REG,\n X87_VAR_FLOATP,\n X87_VAR_INTP,\n@@ -308,7 +308,17 @@ uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode);\n \n addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size);\n addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size);\n-void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode, struct x86_decode_op *op);\n-addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode, addr_t addr, x86_reg_segment seg);\n+void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op);\n+addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode,\n+ addr_t addr, x86_reg_segment seg);\n \n-void init_decoder(CPUState* cpu);\n+void init_decoder(CPUState *cpu);\n+void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op);\n+void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op);\n+void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode,\n+ struct x86_decode_op *op);\n+void set_addressing_size(CPUState *cpu, struct x86_decode *decode);\n+void set_operand_size(CPUState *cpu, struct x86_decode *decode);\ndiff --git a/target/i386/hvf-utils/x86_descr.h b/target/i386/hvf-utils/x86_descr.h\nindex 78fb1bc420..f5e247782b 100644\n--- a/target/i386/hvf-utils/x86_descr.h\n+++ b/target/i386/hvf-utils/x86_descr.h\n@@ -27,14 +27,29 @@ typedef struct vmx_segment {\n uint64_t ar;\n } vmx_segment;\n \n-// deal with vmstate descriptors\n-void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg);\n-void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, x86_reg_segment seg);\n+/* deal with vmstate descriptors */\n+void vmx_read_segment_descriptor(struct CPUState *cpu,\n+ struct vmx_segment *desc, x86_reg_segment seg);\n+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,\n+ x86_reg_segment seg);\n \n-x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu, x86_reg_segment seg);\n-void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, x86_reg_segment seg);\n+x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu,\n+ x86_reg_segment seg);\n+void vmx_write_segment_selector(struct CPUState *cpu,\n+ x68_segment_selector selector,\n+ x86_reg_segment seg);\n \n uint64_t vmx_read_segment_base(struct CPUState *cpu, x86_reg_segment seg);\n-void vmx_write_segment_base(struct CPUState *cpu, x86_reg_segment seg, uint64_t base);\n+void vmx_write_segment_base(struct CPUState *cpu, x86_reg_segment seg,\n+ uint64_t base);\n \n-void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc);\n+void x86_segment_descriptor_to_vmx(struct CPUState *cpu,\n+ x68_segment_selector selector,\n+ struct x86_segment_descriptor *desc,\n+ struct vmx_segment *vmx_desc);\n+\n+uint32_t vmx_read_segment_limit(CPUState *cpu, x86_reg_segment seg);\n+uint32_t vmx_read_segment_ar(CPUState *cpu, x86_reg_segment seg);\n+void vmx_segment_to_x86_descriptor(struct CPUState *cpu,\n+ struct vmx_segment *vmx_desc,\n+ struct x86_segment_descriptor *desc);\ndiff --git a/target/i386/hvf-utils/x86_emu.c b/target/i386/hvf-utils/x86_emu.c\nindex 8b5efc76f0..dc33cd2576 100644\n--- a/target/i386/hvf-utils/x86_emu.c\n+++ b/target/i386/hvf-utils/x86_emu.c\n@@ -45,8 +45,8 @@\n #include \"vmcs.h\"\n #include \"vmx.h\"\n \n-static void print_debug(struct CPUState *cpu);\n-void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int direction, int size, uint32_t count);\n+void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n+ int direction, int size, uint32_t count);\n \n #define EXEC_2OP_LOGIC_CMD(cpu, decode, cmd, FLAGS_FUNC, save_res) \\\n { \\\n@@ -57,8 +57,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint8_t v1 = (uint8_t)decode->op[0].val; \\\n uint8_t v2 = (uint8_t)decode->op[1].val; \\\n uint8_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 1); \\\n+ } \\\n FLAGS_FUNC##_8(diff); \\\n break; \\\n } \\\n@@ -67,8 +68,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint16_t v1 = (uint16_t)decode->op[0].val; \\\n uint16_t v2 = (uint16_t)decode->op[1].val; \\\n uint16_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+ } \\\n FLAGS_FUNC##_16(diff); \\\n break; \\\n } \\\n@@ -77,8 +79,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint32_t v1 = (uint32_t)decode->op[0].val; \\\n uint32_t v2 = (uint32_t)decode->op[1].val; \\\n uint32_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+ } \\\n FLAGS_FUNC##_32(diff); \\\n break; \\\n } \\\n@@ -97,8 +100,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint8_t v1 = (uint8_t)decode->op[0].val; \\\n uint8_t v2 = (uint8_t)decode->op[1].val; \\\n uint8_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 1); \\\n+ } \\\n FLAGS_FUNC##_8(v1, v2, diff); \\\n break; \\\n } \\\n@@ -107,8 +111,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint16_t v1 = (uint16_t)decode->op[0].val; \\\n uint16_t v2 = (uint16_t)decode->op[1].val; \\\n uint16_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+ } \\\n FLAGS_FUNC##_16(v1, v2, diff); \\\n break; \\\n } \\\n@@ -117,8 +122,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n uint32_t v1 = (uint32_t)decode->op[0].val; \\\n uint32_t v2 = (uint32_t)decode->op[1].val; \\\n uint32_t diff = v1 cmd v2; \\\n- if (save_res) \\\n+ if (save_res) { \\\n write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+ } \\\n FLAGS_FUNC##_32(v1, v2, diff); \\\n break; \\\n } \\\n@@ -127,63 +133,63 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, int directio\n } \\\n }\n \n-addr_t read_reg(struct CPUState* cpu, int reg, int size)\n+addr_t read_reg(struct CPUState *cpu, int reg, int size)\n {\n switch (size) {\n- case 1:\n- return cpu->hvf_x86->regs[reg].lx;\n- case 2:\n- return cpu->hvf_x86->regs[reg].rx;\n- case 4:\n- return cpu->hvf_x86->regs[reg].erx;\n- case 8:\n- return cpu->hvf_x86->regs[reg].rrx;\n- default:\n- VM_PANIC_ON(\"read_reg size\");\n+ case 1:\n+ return cpu->hvf_x86->regs[reg].lx;\n+ case 2:\n+ return cpu->hvf_x86->regs[reg].rx;\n+ case 4:\n+ return cpu->hvf_x86->regs[reg].erx;\n+ case 8:\n+ return cpu->hvf_x86->regs[reg].rrx;\n+ default:\n+ VM_PANIC_ON(\"read_reg size\");\n }\n return 0;\n }\n \n-void write_reg(struct CPUState* cpu, int reg, addr_t val, int size)\n+void write_reg(struct CPUState *cpu, int reg, addr_t val, int size)\n {\n switch (size) {\n- case 1:\n- cpu->hvf_x86->regs[reg].lx = val;\n- break;\n- case 2:\n- cpu->hvf_x86->regs[reg].rx = val;\n- break;\n- case 4:\n- cpu->hvf_x86->regs[reg].rrx = (uint32_t)val;\n- break;\n- case 8:\n- cpu->hvf_x86->regs[reg].rrx = val;\n- break;\n- default:\n- VM_PANIC_ON(\"write_reg size\");\n+ case 1:\n+ cpu->hvf_x86->regs[reg].lx = val;\n+ break;\n+ case 2:\n+ cpu->hvf_x86->regs[reg].rx = val;\n+ break;\n+ case 4:\n+ cpu->hvf_x86->regs[reg].rrx = (uint32_t)val;\n+ break;\n+ case 8:\n+ cpu->hvf_x86->regs[reg].rrx = val;\n+ break;\n+ default:\n+ VM_PANIC_ON(\"write_reg size\");\n }\n }\n \n addr_t read_val_from_reg(addr_t reg_ptr, int size)\n {\n addr_t val;\n- \n+\n switch (size) {\n- case 1:\n- val = *(uint8_t*)reg_ptr;\n- break;\n- case 2:\n- val = *(uint16_t*)reg_ptr;\n- break;\n- case 4:\n- val = *(uint32_t*)reg_ptr;\n- break;\n- case 8:\n- val = *(uint64_t*)reg_ptr;\n- break;\n- default:\n- VM_PANIC_ON_EX(1, \"read_val: Unknown size %d\\n\", size);\n- break;\n+ case 1:\n+ val = *(uint8_t *)reg_ptr;\n+ break;\n+ case 2:\n+ val = *(uint16_t *)reg_ptr;\n+ break;\n+ case 4:\n+ val = *(uint32_t *)reg_ptr;\n+ break;\n+ case 8:\n+ val = *(uint64_t *)reg_ptr;\n+ break;\n+ default:\n+ VM_PANIC_ON_EX(1, \"read_val: Unknown size %d\\n\", size);\n+ break;\n }\n return val;\n }\n@@ -191,30 +197,32 @@ addr_t read_val_from_reg(addr_t reg_ptr, int size)\n void write_val_to_reg(addr_t reg_ptr, addr_t val, int size)\n {\n switch (size) {\n- case 1:\n- *(uint8_t*)reg_ptr = val;\n- break;\n- case 2:\n- *(uint16_t*)reg_ptr = val;\n- break;\n- case 4:\n- *(uint64_t*)reg_ptr = (uint32_t)val;\n- break;\n- case 8:\n- *(uint64_t*)reg_ptr = val;\n- break;\n- default:\n- VM_PANIC(\"write_val: Unknown size\\n\");\n- break;\n+ case 1:\n+ *(uint8_t *)reg_ptr = val;\n+ break;\n+ case 2:\n+ *(uint16_t *)reg_ptr = val;\n+ break;\n+ case 4:\n+ *(uint64_t *)reg_ptr = (uint32_t)val;\n+ break;\n+ case 8:\n+ *(uint64_t *)reg_ptr = val;\n+ break;\n+ default:\n+ VM_PANIC(\"write_val: Unknown size\\n\");\n+ break;\n }\n }\n \n-static bool is_host_reg(struct CPUState* cpu, addr_t ptr) {\n+static bool is_host_reg(struct CPUState *cpu, addr_t ptr)\n+{\n return (ptr > (addr_t)cpu && ptr < (addr_t)cpu + sizeof(struct CPUState)) ||\n- (ptr > (addr_t)cpu->hvf_x86 && ptr < (addr_t)(cpu->hvf_x86 + sizeof(struct hvf_x86_state)));\n+ (ptr > (addr_t)cpu->hvf_x86 && ptr <\n+ (addr_t)(cpu->hvf_x86 + sizeof(struct hvf_x86_state)));\n }\n \n-void write_val_ext(struct CPUState* cpu, addr_t ptr, addr_t val, int size)\n+void write_val_ext(struct CPUState *cpu, addr_t ptr, addr_t val, int size)\n {\n if (is_host_reg(cpu, ptr)) {\n write_val_to_reg(ptr, val, size);\n@@ -223,68 +231,77 @@ void write_val_ext(struct CPUState* cpu, addr_t ptr, addr_t val, int size)\n vmx_write_mem(cpu, ptr, &val, size);\n }\n \n-uint8_t *read_mmio(struct CPUState* cpu, addr_t ptr, int bytes)\n+uint8_t *read_mmio(struct CPUState *cpu, addr_t ptr, int bytes)\n {\n vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, ptr, bytes);\n return cpu->hvf_x86->mmio_buf;\n }\n \n-addr_t read_val_ext(struct CPUState* cpu, addr_t ptr, int size)\n+addr_t read_val_ext(struct CPUState *cpu, addr_t ptr, int size)\n {\n addr_t val;\n uint8_t *mmio_ptr;\n- \n+\n if (is_host_reg(cpu, ptr)) {\n return read_val_from_reg(ptr, size);\n }\n- \n+\n mmio_ptr = read_mmio(cpu, ptr, size);\n switch (size) {\n- case 1:\n- val = *(uint8_t*)mmio_ptr;\n- break;\n- case 2:\n- val = *(uint16_t*)mmio_ptr;\n- break;\n- case 4:\n- val = *(uint32_t*)mmio_ptr;\n- break;\n- case 8:\n- val = *(uint64_t*)mmio_ptr;\n- break;\n- default:\n- VM_PANIC(\"bad size\\n\");\n- break;\n+ case 1:\n+ val = *(uint8_t *)mmio_ptr;\n+ break;\n+ case 2:\n+ val = *(uint16_t *)mmio_ptr;\n+ break;\n+ case 4:\n+ val = *(uint32_t *)mmio_ptr;\n+ break;\n+ case 8:\n+ val = *(uint64_t *)mmio_ptr;\n+ break;\n+ default:\n+ VM_PANIC(\"bad size\\n\");\n+ break;\n }\n return val;\n }\n \n-static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode, int n, bool val_op0, bool val_op1, bool val_op2)\n+static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode,\n+ int n, bool val_op0, bool val_op1, bool val_op2)\n {\n int i;\n bool calc_val[3] = {val_op0, val_op1, val_op2};\n \n for (i = 0; i < n; i++) {\n switch (decode->op[i].type) {\n- case X86_VAR_IMMEDIATE:\n- break;\n- case X86_VAR_REG:\n- VM_PANIC_ON(!decode->op[i].ptr);\n- if (calc_val[i])\n- decode->op[i].val = read_val_from_reg(decode->op[i].ptr, decode->operand_size);\n- break;\n- case X86_VAR_RM:\n- calc_modrm_operand(cpu, decode, &decode->op[i]);\n- if (calc_val[i])\n- decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr, decode->operand_size);\n- break;\n- case X86_VAR_OFFSET:\n- decode->op[i].ptr = decode_linear_addr(cpu, decode, decode->op[i].ptr, REG_SEG_DS);\n- if (calc_val[i])\n- decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr, decode->operand_size);\n- break;\n- default:\n- break;\n+ case X86_VAR_IMMEDIATE:\n+ break;\n+ case X86_VAR_REG:\n+ VM_PANIC_ON(!decode->op[i].ptr);\n+ if (calc_val[i]) {\n+ decode->op[i].val = read_val_from_reg(decode->op[i].ptr,\n+ decode->operand_size);\n+ }\n+ break;\n+ case X86_VAR_RM:\n+ calc_modrm_operand(cpu, decode, &decode->op[i]);\n+ if (calc_val[i]) {\n+ decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr,\n+ decode->operand_size);\n+ }\n+ break;\n+ case X86_VAR_OFFSET:\n+ decode->op[i].ptr = decode_linear_addr(cpu, decode,\n+ decode->op[i].ptr,\n+ REG_SEG_DS);\n+ if (calc_val[i]) {\n+ decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr,\n+ decode->operand_size);\n+ }\n+ break;\n+ default:\n+ break;\n }\n }\n }\n@@ -292,7 +309,8 @@ static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode, int\n static void exec_mov(struct CPUState *cpu, struct x86_decode *decode)\n {\n fetch_operands(cpu, decode, 2, false, true, false);\n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, decode->operand_size);\n+ write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val,\n+ decode->operand_size);\n \n RIP(cpu) += decode->len;\n }\n@@ -341,7 +359,7 @@ static void exec_xor(struct CPUState *cpu, struct x86_decode *decode)\n \n static void exec_neg(struct CPUState *cpu, struct x86_decode *decode)\n {\n- //EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+ /*EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/\n int32_t val;\n fetch_operands(cpu, decode, 2, true, true, false);\n \n@@ -350,17 +368,15 @@ static void exec_neg(struct CPUState *cpu, struct x86_decode *decode)\n \n if (4 == decode->operand_size) {\n SET_FLAGS_OSZAPC_SUB_32(0, 0 - val, val);\n- }\n- else if (2 == decode->operand_size) {\n+ } else if (2 == decode->operand_size) {\n SET_FLAGS_OSZAPC_SUB_16(0, 0 - val, val);\n- }\n- else if (1 == decode->operand_size) {\n+ } else if (1 == decode->operand_size) {\n SET_FLAGS_OSZAPC_SUB_8(0, 0 - val, val);\n } else {\n VM_PANIC(\"bad op size\\n\");\n }\n \n- //lflags_to_rflags(cpu);\n+ /*lflags_to_rflags(cpu);*/\n RIP(cpu) += decode->len;\n }\n \n@@ -399,7 +415,8 @@ static void exec_not(struct CPUState *cpu, struct x86_decode *decode)\n {\n fetch_operands(cpu, decode, 1, true, false, false);\n \n- write_val_ext(cpu, decode->op[0].ptr, ~decode->op[0].val, decode->operand_size);\n+ write_val_ext(cpu, decode->op[0].ptr, ~decode->op[0].val,\n+ decode->operand_size);\n RIP(cpu) += decode->len;\n }\n \n@@ -410,10 +427,11 @@ void exec_movzx(struct CPUState *cpu, struct x86_decode *decode)\n \n fetch_operands(cpu, decode, 1, false, false, false);\n \n- if (0xb6 == decode->opcode[1])\n+ if (0xb6 == decode->opcode[1]) {\n src_op_size = 1;\n- else\n+ } else {\n src_op_size = 2;\n+ }\n decode->operand_size = src_op_size;\n calc_modrm_operand(cpu, decode, &decode->op[1]);\n decode->op[1].val = read_val_ext(cpu, decode->op[1].ptr, src_op_size);\n@@ -425,21 +443,22 @@ void exec_movzx(struct CPUState *cpu, struct x86_decode *decode)\n static void exec_out(struct CPUState *cpu, struct x86_decode *decode)\n {\n switch (decode->opcode[0]) {\n- case 0xe6:\n- hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 1, 1, 1);\n- break;\n- case 0xe7:\n- hvf_handle_io(cpu, decode->op[0].val, &RAX(cpu), 1, decode->operand_size, 1);\n- break;\n- case 0xee:\n- hvf_handle_io(cpu, DX(cpu), &AL(cpu), 1, 1, 1);\n- break;\n- case 0xef:\n- hvf_handle_io(cpu, DX(cpu), &RAX(cpu), 1, decode->operand_size, 1);\n- break;\n- default:\n- VM_PANIC(\"Bad out opcode\\n\");\n- break;\n+ case 0xe6:\n+ hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 1, 1, 1);\n+ break;\n+ case 0xe7:\n+ hvf_handle_io(cpu, decode->op[0].val, &RAX(cpu), 1,\n+ decode->operand_size, 1);\n+ break;\n+ case 0xee:\n+ hvf_handle_io(cpu, DX(cpu), &AL(cpu), 1, 1, 1);\n+ break;\n+ case 0xef:\n+ hvf_handle_io(cpu, DX(cpu), &RAX(cpu), 1, decode->operand_size, 1);\n+ break;\n+ default:\n+ VM_PANIC(\"Bad out opcode\\n\");\n+ break;\n }\n RIP(cpu) += decode->len;\n }\n@@ -448,63 +467,73 @@ static void exec_in(struct CPUState *cpu, struct x86_decode *decode)\n {\n addr_t val = 0;\n switch (decode->opcode[0]) {\n- case 0xe4:\n- hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 0, 1, 1);\n- break;\n- case 0xe5:\n- hvf_handle_io(cpu, decode->op[0].val, &val, 0, decode->operand_size, 1);\n- if (decode->operand_size == 2)\n- AX(cpu) = val;\n- else\n- RAX(cpu) = (uint32_t)val;\n- break;\n- case 0xec:\n- hvf_handle_io(cpu, DX(cpu), &AL(cpu), 0, 1, 1);\n- break;\n- case 0xed:\n- hvf_handle_io(cpu, DX(cpu), &val, 0, decode->operand_size, 1);\n- if (decode->operand_size == 2)\n- AX(cpu) = val;\n- else\n- RAX(cpu) = (uint32_t)val;\n+ case 0xe4:\n+ hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 0, 1, 1);\n+ break;\n+ case 0xe5:\n+ hvf_handle_io(cpu, decode->op[0].val, &val, 0, decode->operand_size, 1);\n+ if (decode->operand_size == 2) {\n+ AX(cpu) = val;\n+ } else {\n+ RAX(cpu) = (uint32_t)val;\n+ }\n+ break;\n+ case 0xec:\n+ hvf_handle_io(cpu, DX(cpu), &AL(cpu), 0, 1, 1);\n+ break;\n+ case 0xed:\n+ hvf_handle_io(cpu, DX(cpu), &val, 0, decode->operand_size, 1);\n+ if (decode->operand_size == 2) {\n+ AX(cpu) = val;\n+ } else {\n+ RAX(cpu) = (uint32_t)val;\n+ }\n \n- break;\n- default:\n- VM_PANIC(\"Bad in opcode\\n\");\n- break;\n+ break;\n+ default:\n+ VM_PANIC(\"Bad in opcode\\n\");\n+ break;\n }\n \n RIP(cpu) += decode->len;\n }\n \n-static inline void string_increment_reg(struct CPUState * cpu, int reg, struct x86_decode *decode)\n+static inline void string_increment_reg(struct CPUState *cpu, int reg,\n+ struct x86_decode *decode)\n {\n addr_t val = read_reg(cpu, reg, decode->addressing_size);\n- if (cpu->hvf_x86->rflags.df)\n+ if (cpu->hvf_x86->rflags.df) {\n val -= decode->operand_size;\n- else\n+ } else {\n val += decode->operand_size;\n+ }\n write_reg(cpu, reg, val, decode->addressing_size);\n }\n \n-static inline void string_rep(struct CPUState * cpu, struct x86_decode *decode, void (*func)(struct CPUState *cpu, struct x86_decode *ins), int rep)\n+static inline void string_rep(struct CPUState *cpu, struct x86_decode *decode,\n+ void (*func)(struct CPUState *cpu,\n+ struct x86_decode *ins), int rep)\n {\n addr_t rcx = read_reg(cpu, REG_RCX, decode->addressing_size);\n while (rcx--) {\n func(cpu, decode);\n write_reg(cpu, REG_RCX, rcx, decode->addressing_size);\n- if ((PREFIX_REP == rep) && !get_ZF(cpu))\n+ if ((PREFIX_REP == rep) && !get_ZF(cpu)) {\n break;\n- if ((PREFIX_REPN == rep) && get_ZF(cpu))\n+ }\n+ if ((PREFIX_REPN == rep) && get_ZF(cpu)) {\n break;\n+ }\n }\n }\n \n static void exec_ins_single(struct CPUState *cpu, struct x86_decode *decode)\n {\n- addr_t addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+ addr_t addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ REG_SEG_ES);\n \n- hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 0, decode->operand_size, 1);\n+ hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 0,\n+ decode->operand_size, 1);\n vmx_write_mem(cpu, addr, cpu->hvf_x86->mmio_buf, decode->operand_size);\n \n string_increment_reg(cpu, REG_RDI, decode);\n@@ -512,10 +541,11 @@ static void exec_ins_single(struct CPUState *cpu, struct x86_decode *decode)\n \n static void exec_ins(struct CPUState *cpu, struct x86_decode *decode)\n {\n- if (decode->rep)\n+ if (decode->rep) {\n string_rep(cpu, decode, exec_ins_single, 0);\n- else\n+ } else {\n exec_ins_single(cpu, decode);\n+ }\n \n RIP(cpu) += decode->len;\n }\n@@ -525,18 +555,20 @@ static void exec_outs_single(struct CPUState *cpu, struct x86_decode *decode)\n addr_t addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n \n vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, addr, decode->operand_size);\n- hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 1, decode->operand_size, 1);\n+ hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 1,\n+ decode->operand_size, 1);\n \n string_increment_reg(cpu, REG_RSI, decode);\n }\n \n static void exec_outs(struct CPUState *cpu, struct x86_decode *decode)\n {\n- if (decode->rep)\n+ if (decode->rep) {\n string_rep(cpu, decode, exec_outs_single, 0);\n- else\n+ } else {\n exec_outs_single(cpu, decode);\n- \n+ }\n+\n RIP(cpu) += decode->len;\n }\n \n@@ -545,10 +577,11 @@ static void exec_movs_single(struct CPUState *cpu, struct x86_decode *decode)\n addr_t src_addr;\n addr_t dst_addr;\n addr_t val;\n- \n+\n src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n- dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n- \n+ dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ REG_SEG_ES);\n+\n val = read_val_ext(cpu, src_addr, decode->operand_size);\n write_val_ext(cpu, dst_addr, val, decode->operand_size);\n \n@@ -560,9 +593,9 @@ static void exec_movs(struct CPUState *cpu, struct x86_decode *decode)\n {\n if (decode->rep) {\n string_rep(cpu, decode, exec_movs_single, 0);\n- }\n- else\n+ } else {\n exec_movs_single(cpu, decode);\n+ }\n \n RIP(cpu) += decode->len;\n }\n@@ -573,7 +606,8 @@ static void exec_cmps_single(struct CPUState *cpu, struct x86_decode *decode)\n addr_t dst_addr;\n \n src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n- dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+ dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ REG_SEG_ES);\n \n decode->op[0].type = X86_VAR_IMMEDIATE;\n decode->op[0].val = read_val_ext(cpu, src_addr, decode->operand_size);\n@@ -590,9 +624,9 @@ static void exec_cmps(struct CPUState *cpu, struct x86_decode *decode)\n {\n if (decode->rep) {\n string_rep(cpu, decode, exec_cmps_single, decode->rep);\n- }\n- else\n+ } else {\n exec_cmps_single(cpu, decode);\n+ }\n RIP(cpu) += decode->len;\n }\n \n@@ -614,9 +648,9 @@ static void exec_stos(struct CPUState *cpu, struct x86_decode *decode)\n {\n if (decode->rep) {\n string_rep(cpu, decode, exec_stos_single, 0);\n- }\n- else\n+ } else {\n exec_stos_single(cpu, decode);\n+ }\n \n RIP(cpu) += decode->len;\n }\n@@ -624,7 +658,7 @@ static void exec_stos(struct CPUState *cpu, struct x86_decode *decode)\n static void exec_scas_single(struct CPUState *cpu, struct x86_decode *decode)\n {\n addr_t addr;\n- \n+\n addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n decode->op[1].type = X86_VAR_IMMEDIATE;\n vmx_read_mem(cpu, &decode->op[1].val, addr, decode->operand_size);\n@@ -639,9 +673,9 @@ static void exec_scas(struct CPUState *cpu, struct x86_decode *decode)\n decode->op[0].reg = REG_RAX;\n if (decode->rep) {\n string_rep(cpu, decode, exec_scas_single, decode->rep);\n- }\n- else\n+ } else {\n exec_scas_single(cpu, decode);\n+ }\n \n RIP(cpu) += decode->len;\n }\n@@ -650,7 +684,7 @@ static void exec_lods_single(struct CPUState *cpu, struct x86_decode *decode)\n {\n addr_t addr;\n addr_t val = 0;\n- \n+\n addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n vmx_read_mem(cpu, &val, addr, decode->operand_size);\n write_reg(cpu, REG_RAX, val, decode->operand_size);\n@@ -662,14 +696,14 @@ static void exec_lods(struct CPUState *cpu, struct x86_decode *decode)\n {\n if (decode->rep) {\n string_rep(cpu, decode, exec_lods_single, 0);\n- }\n- else\n+ } else {\n exec_lods_single(cpu, decode);\n+ }\n \n RIP(cpu) += decode->len;\n }\n \n-#define MSR_IA32_UCODE_REV \t\t0x00000017\n+#define MSR_IA32_UCODE_REV 0x00000017\n \n void simulate_rdmsr(struct CPUState *cpu)\n {\n@@ -679,83 +713,83 @@ void simulate_rdmsr(struct CPUState *cpu)\n uint64_t val = 0;\n \n switch (msr) {\n- case MSR_IA32_TSC:\n- val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);\n- break;\n- case MSR_IA32_APICBASE:\n- val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);\n- break;\n- case MSR_IA32_UCODE_REV:\n- val = (0x100000000ULL << 32) | 0x100000000ULL;\n- break;\n- case MSR_EFER:\n- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);\n- break;\n- case MSR_FSBASE:\n- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);\n- break;\n- case MSR_GSBASE:\n- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);\n- break;\n- case MSR_KERNELGSBASE:\n- val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);\n- break;\n- case MSR_STAR:\n- abort();\n- break;\n- case MSR_LSTAR:\n- abort();\n- break;\n- case MSR_CSTAR:\n- abort();\n- break;\n- case MSR_IA32_MISC_ENABLE:\n- val = env->msr_ia32_misc_enable;\n- break;\n- case MSR_MTRRphysBase(0):\n- case MSR_MTRRphysBase(1):\n- case MSR_MTRRphysBase(2):\n- case MSR_MTRRphysBase(3):\n- case MSR_MTRRphysBase(4):\n- case MSR_MTRRphysBase(5):\n- case MSR_MTRRphysBase(6):\n- case MSR_MTRRphysBase(7):\n- val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base;\n- break;\n- case MSR_MTRRphysMask(0):\n- case MSR_MTRRphysMask(1):\n- case MSR_MTRRphysMask(2):\n- case MSR_MTRRphysMask(3):\n- case MSR_MTRRphysMask(4):\n- case MSR_MTRRphysMask(5):\n- case MSR_MTRRphysMask(6):\n- case MSR_MTRRphysMask(7):\n- val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask;\n- break;\n- case MSR_MTRRfix64K_00000:\n- val = env->mtrr_fixed[0];\n- break;\n- case MSR_MTRRfix16K_80000:\n- case MSR_MTRRfix16K_A0000:\n- val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1];\n- break;\n- case MSR_MTRRfix4K_C0000:\n- case MSR_MTRRfix4K_C8000:\n- case MSR_MTRRfix4K_D0000:\n- case MSR_MTRRfix4K_D8000:\n- case MSR_MTRRfix4K_E0000:\n- case MSR_MTRRfix4K_E8000:\n- case MSR_MTRRfix4K_F0000:\n- case MSR_MTRRfix4K_F8000:\n- val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3];\n- break;\n- case MSR_MTRRdefType:\n- val = env->mtrr_deftype;\n- break;\n- default:\n- // fprintf(stderr, \"%s: unknown msr 0x%x\\n\", __func__, msr);\n- val = 0;\n- break;\n+ case MSR_IA32_TSC:\n+ val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);\n+ break;\n+ case MSR_IA32_APICBASE:\n+ val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);\n+ break;\n+ case MSR_IA32_UCODE_REV:\n+ val = (0x100000000ULL << 32) | 0x100000000ULL;\n+ break;\n+ case MSR_EFER:\n+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);\n+ break;\n+ case MSR_FSBASE:\n+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);\n+ break;\n+ case MSR_GSBASE:\n+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);\n+ break;\n+ case MSR_KERNELGSBASE:\n+ val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);\n+ break;\n+ case MSR_STAR:\n+ abort();\n+ break;\n+ case MSR_LSTAR:\n+ abort();\n+ break;\n+ case MSR_CSTAR:\n+ abort();\n+ break;\n+ case MSR_IA32_MISC_ENABLE:\n+ val = env->msr_ia32_misc_enable;\n+ break;\n+ case MSR_MTRRphysBase(0):\n+ case MSR_MTRRphysBase(1):\n+ case MSR_MTRRphysBase(2):\n+ case MSR_MTRRphysBase(3):\n+ case MSR_MTRRphysBase(4):\n+ case MSR_MTRRphysBase(5):\n+ case MSR_MTRRphysBase(6):\n+ case MSR_MTRRphysBase(7):\n+ val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base;\n+ break;\n+ case MSR_MTRRphysMask(0):\n+ case MSR_MTRRphysMask(1):\n+ case MSR_MTRRphysMask(2):\n+ case MSR_MTRRphysMask(3):\n+ case MSR_MTRRphysMask(4):\n+ case MSR_MTRRphysMask(5):\n+ case MSR_MTRRphysMask(6):\n+ case MSR_MTRRphysMask(7):\n+ val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask;\n+ break;\n+ case MSR_MTRRfix64K_00000:\n+ val = env->mtrr_fixed[0];\n+ break;\n+ case MSR_MTRRfix16K_80000:\n+ case MSR_MTRRfix16K_A0000:\n+ val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1];\n+ break;\n+ case MSR_MTRRfix4K_C0000:\n+ case MSR_MTRRfix4K_C8000:\n+ case MSR_MTRRfix4K_D0000:\n+ case MSR_MTRRfix4K_D8000:\n+ case MSR_MTRRfix4K_E0000:\n+ case MSR_MTRRfix4K_E8000:\n+ case MSR_MTRRfix4K_F0000:\n+ case MSR_MTRRfix4K_F8000:\n+ val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3];\n+ break;\n+ case MSR_MTRRdefType:\n+ val = env->mtrr_deftype;\n+ break;\n+ default:\n+ /* fprintf(stderr, \"%s: unknown msr 0x%x\\n\", __func__, msr); */\n+ val = 0;\n+ break;\n }\n \n RAX(cpu) = (uint32_t)val;\n@@ -776,88 +810,89 @@ void simulate_wrmsr(struct CPUState *cpu)\n uint64_t data = ((uint64_t)EDX(cpu) << 32) | EAX(cpu);\n \n switch (msr) {\n- case MSR_IA32_TSC:\n- // if (!osx_is_sierra())\n- // wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());\n- //hv_vm_sync_tsc(data);\n- break;\n- case MSR_IA32_APICBASE:\n- cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);\n- break;\n- case MSR_FSBASE:\n- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);\n- break;\n- case MSR_GSBASE:\n- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);\n- break;\n- case MSR_KERNELGSBASE:\n- wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);\n- break;\n- case MSR_STAR:\n- abort();\n- break;\n- case MSR_LSTAR:\n- abort();\n- break;\n- case MSR_CSTAR:\n- abort();\n- break;\n- case MSR_EFER:\n- cpu->hvf_x86->efer.efer = data;\n- //printf(\"new efer %llx\\n\", EFER(cpu));\n- wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);\n- if (data & EFER_NXE)\n- hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n- break;\n- case MSR_MTRRphysBase(0):\n- case MSR_MTRRphysBase(1):\n- case MSR_MTRRphysBase(2):\n- case MSR_MTRRphysBase(3):\n- case MSR_MTRRphysBase(4):\n- case MSR_MTRRphysBase(5):\n- case MSR_MTRRphysBase(6):\n- case MSR_MTRRphysBase(7):\n- env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base = data;\n- break;\n- case MSR_MTRRphysMask(0):\n- case MSR_MTRRphysMask(1):\n- case MSR_MTRRphysMask(2):\n- case MSR_MTRRphysMask(3):\n- case MSR_MTRRphysMask(4):\n- case MSR_MTRRphysMask(5):\n- case MSR_MTRRphysMask(6):\n- case MSR_MTRRphysMask(7):\n- env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask = data;\n- break;\n- case MSR_MTRRfix64K_00000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix64K_00000] = data;\n- break;\n- case MSR_MTRRfix16K_80000:\n- case MSR_MTRRfix16K_A0000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1] = data;\n- break;\n- case MSR_MTRRfix4K_C0000:\n- case MSR_MTRRfix4K_C8000:\n- case MSR_MTRRfix4K_D0000:\n- case MSR_MTRRfix4K_D8000:\n- case MSR_MTRRfix4K_E0000:\n- case MSR_MTRRfix4K_E8000:\n- case MSR_MTRRfix4K_F0000:\n- case MSR_MTRRfix4K_F8000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3] = data;\n- break;\n- case MSR_MTRRdefType:\n- env->mtrr_deftype = data;\n- break;\n- default:\n- break;\n+ case MSR_IA32_TSC:\n+ /* if (!osx_is_sierra())\n+ wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());\n+ hv_vm_sync_tsc(data);*/\n+ break;\n+ case MSR_IA32_APICBASE:\n+ cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);\n+ break;\n+ case MSR_FSBASE:\n+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);\n+ break;\n+ case MSR_GSBASE:\n+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);\n+ break;\n+ case MSR_KERNELGSBASE:\n+ wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);\n+ break;\n+ case MSR_STAR:\n+ abort();\n+ break;\n+ case MSR_LSTAR:\n+ abort();\n+ break;\n+ case MSR_CSTAR:\n+ abort();\n+ break;\n+ case MSR_EFER:\n+ cpu->hvf_x86->efer.efer = data;\n+ /*printf(\"new efer %llx\\n\", EFER(cpu));*/\n+ wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);\n+ if (data & EFER_NXE) {\n+ hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n+ }\n+ break;\n+ case MSR_MTRRphysBase(0):\n+ case MSR_MTRRphysBase(1):\n+ case MSR_MTRRphysBase(2):\n+ case MSR_MTRRphysBase(3):\n+ case MSR_MTRRphysBase(4):\n+ case MSR_MTRRphysBase(5):\n+ case MSR_MTRRphysBase(6):\n+ case MSR_MTRRphysBase(7):\n+ env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base = data;\n+ break;\n+ case MSR_MTRRphysMask(0):\n+ case MSR_MTRRphysMask(1):\n+ case MSR_MTRRphysMask(2):\n+ case MSR_MTRRphysMask(3):\n+ case MSR_MTRRphysMask(4):\n+ case MSR_MTRRphysMask(5):\n+ case MSR_MTRRphysMask(6):\n+ case MSR_MTRRphysMask(7):\n+ env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask = data;\n+ break;\n+ case MSR_MTRRfix64K_00000:\n+ env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix64K_00000] = data;\n+ break;\n+ case MSR_MTRRfix16K_80000:\n+ case MSR_MTRRfix16K_A0000:\n+ env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1] = data;\n+ break;\n+ case MSR_MTRRfix4K_C0000:\n+ case MSR_MTRRfix4K_C8000:\n+ case MSR_MTRRfix4K_D0000:\n+ case MSR_MTRRfix4K_D8000:\n+ case MSR_MTRRfix4K_E0000:\n+ case MSR_MTRRfix4K_E8000:\n+ case MSR_MTRRfix4K_F0000:\n+ case MSR_MTRRfix4K_F8000:\n+ env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3] = data;\n+ break;\n+ case MSR_MTRRdefType:\n+ env->mtrr_deftype = data;\n+ break;\n+ default:\n+ break;\n }\n \n /* Related to support known hypervisor interface */\n- // if (g_hypervisor_iface)\n- // g_hypervisor_iface->wrmsr_handler(cpu, msr, data);\n+ /* if (g_hypervisor_iface)\n+ g_hypervisor_iface->wrmsr_handler(cpu, msr, data);\n \n- //printf(\"write msr %llx\\n\", RCX(cpu));\n+ printf(\"write msr %llx\\n\", RCX(cpu));*/\n }\n \n static void exec_wrmsr(struct CPUState *cpu, struct x86_decode *decode)\n@@ -893,24 +928,26 @@ static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n VM_PANIC(\"bt 64bit\\n\");\n }\n }\n- decode->op[0].val = read_val_ext(cpu, decode->op[0].ptr, decode->operand_size);\n+ decode->op[0].val = read_val_ext(cpu, decode->op[0].ptr,\n+ decode->operand_size);\n cf = (decode->op[0].val >> index) & 0x01;\n \n switch (flag) {\n- case 0:\n- set_CF(cpu, cf);\n- return;\n- case 1:\n- decode->op[0].val ^= (1u << index);\n- break;\n- case 2:\n- decode->op[0].val |= (1u << index);\n- break;\n- case 3:\n- decode->op[0].val &= ~(1u << index);\n- break;\n+ case 0:\n+ set_CF(cpu, cf);\n+ return;\n+ case 1:\n+ decode->op[0].val ^= (1u << index);\n+ break;\n+ case 2:\n+ decode->op[0].val |= (1u << index);\n+ break;\n+ case 3:\n+ decode->op[0].val &= ~(1u << index);\n+ break;\n }\n- write_val_ext(cpu, decode->op[0].ptr, decode->op[0].val, decode->operand_size);\n+ write_val_ext(cpu, decode->op[0].ptr, decode->op[0].val,\n+ decode->operand_size);\n set_CF(cpu, cf);\n }\n \n@@ -946,58 +983,59 @@ void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n fetch_operands(cpu, decode, 2, true, true, false);\n \n count = decode->op[1].val;\n- count &= 0x1f; // count is masked to 5 bits\n- if (!count)\n+ count &= 0x1f; /* count is masked to 5 bits*/\n+ if (!count) {\n goto exit;\n+ }\n \n switch (decode->operand_size) {\n- case 1:\n- {\n- uint8_t res = 0;\n- if (count <= 8) {\n- res = (decode->op[0].val << count);\n- cf = (decode->op[0].val >> (8 - count)) & 0x1;\n- of = cf ^ (res >> 7);\n- }\n-\n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n- SET_FLAGS_OSZAPC_LOGIC_8(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n- break;\n+ case 1:\n+ {\n+ uint8_t res = 0;\n+ if (count <= 8) {\n+ res = (decode->op[0].val << count);\n+ cf = (decode->op[0].val >> (8 - count)) & 0x1;\n+ of = cf ^ (res >> 7);\n }\n- case 2:\n- {\n- uint16_t res = 0;\n-\n- /* from bochs */\n- if (count <= 16) {\n- res = (decode->op[0].val << count);\n- cf = (decode->op[0].val >> (16 - count)) & 0x1;\n- of = cf ^ (res >> 15); // of = cf ^ result15\n- }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n- SET_FLAGS_OSZAPC_LOGIC_16(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n- break;\n- }\n- case 4:\n- {\n- uint32_t res = decode->op[0].val << count;\n- \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n- SET_FLAGS_OSZAPC_LOGIC_32(res);\n- cf = (decode->op[0].val >> (32 - count)) & 0x1;\n- of = cf ^ (res >> 31); // of = cf ^ result31\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n- break;\n+ write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ SET_FLAGS_OSZAPC_LOGIC_8(res);\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 2:\n+ {\n+ uint16_t res = 0;\n+\n+ /* from bochs */\n+ if (count <= 16) {\n+ res = (decode->op[0].val << count);\n+ cf = (decode->op[0].val >> (16 - count)) & 0x1;\n+ of = cf ^ (res >> 15); /* of = cf ^ result15 */\n }\n- default:\n- abort();\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ SET_FLAGS_OSZAPC_LOGIC_16(res);\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 4:\n+ {\n+ uint32_t res = decode->op[0].val << count;\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ SET_FLAGS_OSZAPC_LOGIC_32(res);\n+ cf = (decode->op[0].val >> (32 - count)) & 0x1;\n+ of = cf ^ (res >> 31); /* of = cf ^ result31 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ default:\n+ abort();\n }\n \n exit:\n- //lflags_to_rflags(cpu);\n+ /* lflags_to_rflags(cpu); */\n RIP(cpu) += decode->len;\n }\n \n@@ -1008,14 +1046,16 @@ void exec_movsx(struct CPUState *cpu, struct x86_decode *decode)\n \n fetch_operands(cpu, decode, 2, false, false, false);\n \n- if (0xbe == decode->opcode[1])\n+ if (0xbe == decode->opcode[1]) {\n src_op_size = 1;\n- else\n+ } else {\n src_op_size = 2;\n+ }\n \n decode->operand_size = src_op_size;\n calc_modrm_operand(cpu, decode, &decode->op[1]);\n- decode->op[1].val = sign(read_val_ext(cpu, decode->op[1].ptr, src_op_size), src_op_size);\n+ decode->op[1].val = sign(read_val_ext(cpu, decode->op[1].ptr, src_op_size),\n+ src_op_size);\n \n write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, op_size);\n \n@@ -1030,68 +1070,71 @@ void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n count = decode->op[1].val;\n \n switch (decode->operand_size) {\n- case 1:\n- {\n- uint32_t bit6, bit7;\n- uint8_t res;\n-\n- if ((count & 0x07) == 0) {\n- if (count & 0x18) {\n- bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;\n- bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;\n- SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n- }\n- } else {\n- count &= 0x7; /* use only bottom 3 bits */\n- res = ((uint8_t)decode->op[0].val >> count) | ((uint8_t)decode->op[0].val << (8 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n- bit6 = (res >> 6) & 1;\n- bit7 = (res >> 7) & 1;\n- /* set eflags: ROR count affects the following flags: C, O */\n+ case 1:\n+ {\n+ uint32_t bit6, bit7;\n+ uint8_t res;\n+\n+ if ((count & 0x07) == 0) {\n+ if (count & 0x18) {\n+ bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;\n+ bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;\n SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n- }\n- break;\n+ }\n+ } else {\n+ count &= 0x7; /* use only bottom 3 bits */\n+ res = ((uint8_t)decode->op[0].val >> count) |\n+ ((uint8_t)decode->op[0].val << (8 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ bit6 = (res >> 6) & 1;\n+ bit7 = (res >> 7) & 1;\n+ /* set eflags: ROR count affects the following flags: C, O */\n+ SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n }\n- case 2:\n- {\n- uint32_t bit14, bit15;\n- uint16_t res;\n-\n- if ((count & 0x0f) == 0) {\n- if (count & 0x10) {\n- bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;\n- bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;\n- // of = result14 ^ result15\n- SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n- }\n- } else {\n- count &= 0x0f; // use only 4 LSB's\n- res = ((uint16_t)decode->op[0].val >> count) | ((uint16_t)decode->op[0].val << (16 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n-\n- bit14 = (res >> 14) & 1;\n- bit15 = (res >> 15) & 1;\n- // of = result14 ^ result15\n+ break;\n+ }\n+ case 2:\n+ {\n+ uint32_t bit14, bit15;\n+ uint16_t res;\n+\n+ if ((count & 0x0f) == 0) {\n+ if (count & 0x10) {\n+ bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;\n+ bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;\n+ /* of = result14 ^ result15 */\n SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n }\n- break;\n+ } else {\n+ count &= 0x0f; /* use only 4 LSB's */\n+ res = ((uint16_t)decode->op[0].val >> count) |\n+ ((uint16_t)decode->op[0].val << (16 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+\n+ bit14 = (res >> 14) & 1;\n+ bit15 = (res >> 15) & 1;\n+ /* of = result14 ^ result15 */\n+ SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n }\n- case 4:\n- {\n- uint32_t bit31, bit30;\n- uint32_t res;\n-\n- count &= 0x1f;\n- if (count) {\n- res = ((uint32_t)decode->op[0].val >> count) | ((uint32_t)decode->op[0].val << (32 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n-\n- bit31 = (res >> 31) & 1;\n- bit30 = (res >> 30) & 1;\n- // of = result30 ^ result31\n- SET_FLAGS_OxxxxC(cpu, bit30 ^ bit31, bit31);\n- }\n- break;\n+ break;\n+ }\n+ case 4:\n+ {\n+ uint32_t bit31, bit30;\n+ uint32_t res;\n+\n+ count &= 0x1f;\n+ if (count) {\n+ res = ((uint32_t)decode->op[0].val >> count) |\n+ ((uint32_t)decode->op[0].val << (32 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+ bit31 = (res >> 31) & 1;\n+ bit30 = (res >> 30) & 1;\n+ /* of = result30 ^ result31 */\n+ SET_FLAGS_OxxxxC(cpu, bit30 ^ bit31, bit31);\n+ }\n+ break;\n }\n }\n RIP(cpu) += decode->len;\n@@ -1105,71 +1148,74 @@ void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n count = decode->op[1].val;\n \n switch (decode->operand_size) {\n- case 1:\n- {\n- uint32_t bit0, bit7;\n- uint8_t res;\n-\n- if ((count & 0x07) == 0) {\n- if (count & 0x18) {\n- bit0 = ((uint8_t)decode->op[0].val & 1);\n- bit7 = ((uint8_t)decode->op[0].val >> 7);\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n- }\n- } else {\n- count &= 0x7; // use only lowest 3 bits\n- res = ((uint8_t)decode->op[0].val << count) | ((uint8_t)decode->op[0].val >> (8 - count));\n-\n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n- /* set eflags:\n- * ROL count affects the following flags: C, O\n- */\n- bit0 = (res & 1);\n- bit7 = (res >> 7);\n+ case 1:\n+ {\n+ uint32_t bit0, bit7;\n+ uint8_t res;\n+\n+ if ((count & 0x07) == 0) {\n+ if (count & 0x18) {\n+ bit0 = ((uint8_t)decode->op[0].val & 1);\n+ bit7 = ((uint8_t)decode->op[0].val >> 7);\n SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n }\n- break;\n+ } else {\n+ count &= 0x7; /* use only lowest 3 bits */\n+ res = ((uint8_t)decode->op[0].val << count) |\n+ ((uint8_t)decode->op[0].val >> (8 - count));\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ /* set eflags:\n+ * ROL count affects the following flags: C, O\n+ */\n+ bit0 = (res & 1);\n+ bit7 = (res >> 7);\n+ SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n }\n- case 2:\n- {\n- uint32_t bit0, bit15;\n- uint16_t res;\n-\n- if ((count & 0x0f) == 0) {\n- if (count & 0x10) {\n- bit0 = ((uint16_t)decode->op[0].val & 0x1);\n- bit15 = ((uint16_t)decode->op[0].val >> 15);\n- // of = cf ^ result15\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n- }\n- } else {\n- count &= 0x0f; // only use bottom 4 bits\n- res = ((uint16_t)decode->op[0].val << count) | ((uint16_t)decode->op[0].val >> (16 - count));\n-\n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n- bit0 = (res & 0x1);\n- bit15 = (res >> 15);\n- // of = cf ^ result15\n+ break;\n+ }\n+ case 2:\n+ {\n+ uint32_t bit0, bit15;\n+ uint16_t res;\n+\n+ if ((count & 0x0f) == 0) {\n+ if (count & 0x10) {\n+ bit0 = ((uint16_t)decode->op[0].val & 0x1);\n+ bit15 = ((uint16_t)decode->op[0].val >> 15);\n+ /* of = cf ^ result15 */\n SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n }\n- break;\n+ } else {\n+ count &= 0x0f; /* only use bottom 4 bits */\n+ res = ((uint16_t)decode->op[0].val << count) |\n+ ((uint16_t)decode->op[0].val >> (16 - count));\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ bit0 = (res & 0x1);\n+ bit15 = (res >> 15);\n+ /* of = cf ^ result15 */\n+ SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n }\n- case 4:\n- {\n- uint32_t bit0, bit31;\n- uint32_t res;\n-\n- count &= 0x1f;\n- if (count) {\n- res = ((uint32_t)decode->op[0].val << count) | ((uint32_t)decode->op[0].val >> (32 - count));\n-\n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n- bit0 = (res & 0x1);\n- bit31 = (res >> 31);\n- // of = cf ^ result31\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit31, bit0);\n- }\n- break;\n+ break;\n+ }\n+ case 4:\n+ {\n+ uint32_t bit0, bit31;\n+ uint32_t res;\n+\n+ count &= 0x1f;\n+ if (count) {\n+ res = ((uint32_t)decode->op[0].val << count) |\n+ ((uint32_t)decode->op[0].val >> (32 - count));\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ bit0 = (res & 0x1);\n+ bit31 = (res >> 31);\n+ /* of = cf ^ result31 */\n+ SET_FLAGS_OxxxxC(cpu, bit0 ^ bit31, bit0);\n+ }\n+ break;\n }\n }\n RIP(cpu) += decode->len;\n@@ -1184,70 +1230,79 @@ void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n fetch_operands(cpu, decode, 2, true, true, false);\n count = decode->op[1].val & 0x1f;\n \n- switch(decode->operand_size) {\n- case 1:\n- {\n- uint8_t op1_8 = decode->op[0].val;\n- uint8_t res;\n- count %= 9;\n- if (!count)\n- break;\n+ switch (decode->operand_size) {\n+ case 1:\n+ {\n+ uint8_t op1_8 = decode->op[0].val;\n+ uint8_t res;\n+ count %= 9;\n+ if (!count) {\n+ break;\n+ }\n+\n+ if (1 == count) {\n+ res = (op1_8 << 1) | get_CF(cpu);\n+ } else {\n+ res = (op1_8 << count) | (get_CF(cpu) << (count - 1)) |\n+ (op1_8 >> (9 - count));\n+ }\n \n- if (1 == count)\n- res = (op1_8 << 1) | get_CF(cpu);\n- else\n- res = (op1_8 << count) | (get_CF(cpu) << (count - 1)) | (op1_8 >> (9 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 1);\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ cf = (op1_8 >> (8 - count)) & 0x01;\n+ of = cf ^ (res >> 7); /* of = cf ^ result7 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 2:\n+ {\n+ uint16_t res;\n+ uint16_t op1_16 = decode->op[0].val;\n \n- cf = (op1_8 >> (8 - count)) & 0x01;\n- of = cf ^ (res >> 7); // of = cf ^ result7\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n- break;\n- }\n- case 2:\n- {\n- uint16_t res;\n- uint16_t op1_16 = decode->op[0].val;\n-\n- count %= 17;\n- if (!count)\n- break;\n-\n- if (1 == count)\n- res = (op1_16 << 1) | get_CF(cpu);\n- else if (count == 16)\n- res = (get_CF(cpu) << 15) | (op1_16 >> 1);\n- else // 2..15\n- res = (op1_16 << count) | (get_CF(cpu) << (count - 1)) | (op1_16 >> (17 - count));\n- \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n- \n- cf = (op1_16 >> (16 - count)) & 0x1;\n- of = cf ^ (res >> 15); // of = cf ^ result15\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ count %= 17;\n+ if (!count) {\n break;\n }\n- case 4:\n- {\n- uint32_t res;\n- uint32_t op1_32 = decode->op[0].val;\n \n- if (!count)\n- break;\n+ if (1 == count) {\n+ res = (op1_16 << 1) | get_CF(cpu);\n+ } else if (count == 16) {\n+ res = (get_CF(cpu) << 15) | (op1_16 >> 1);\n+ } else { /* 2..15 */\n+ res = (op1_16 << count) | (get_CF(cpu) << (count - 1)) |\n+ (op1_16 >> (17 - count));\n+ }\n \n- if (1 == count)\n- res = (op1_32 << 1) | get_CF(cpu);\n- else\n- res = (op1_32 << count) | (get_CF(cpu) << (count - 1)) | (op1_32 >> (33 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 2);\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ cf = (op1_16 >> (16 - count)) & 0x1;\n+ of = cf ^ (res >> 15); /* of = cf ^ result15 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 4:\n+ {\n+ uint32_t res;\n+ uint32_t op1_32 = decode->op[0].val;\n \n- cf = (op1_32 >> (32 - count)) & 0x1;\n- of = cf ^ (res >> 31); // of = cf ^ result31\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ if (!count) {\n break;\n }\n+\n+ if (1 == count) {\n+ res = (op1_32 << 1) | get_CF(cpu);\n+ } else {\n+ res = (op1_32 << count) | (get_CF(cpu) << (count - 1)) |\n+ (op1_32 >> (33 - count));\n+ }\n+\n+ write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+ cf = (op1_32 >> (32 - count)) & 0x1;\n+ of = cf ^ (res >> 31); /* of = cf ^ result31 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n }\n RIP(cpu) += decode->len;\n }\n@@ -1260,60 +1315,68 @@ void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n fetch_operands(cpu, decode, 2, true, true, false);\n count = decode->op[1].val & 0x1f;\n \n- switch(decode->operand_size) {\n- case 1:\n- {\n- uint8_t op1_8 = decode->op[0].val;\n- uint8_t res;\n+ switch (decode->operand_size) {\n+ case 1:\n+ {\n+ uint8_t op1_8 = decode->op[0].val;\n+ uint8_t res;\n \n- count %= 9;\n- if (!count)\n- break;\n- res = (op1_8 >> count) | (get_CF(cpu) << (8 - count)) | (op1_8 << (9 - count));\n+ count %= 9;\n+ if (!count) {\n+ break;\n+ }\n+ res = (op1_8 >> count) | (get_CF(cpu) << (8 - count)) |\n+ (op1_8 << (9 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+\n+ cf = (op1_8 >> (count - 1)) & 0x1;\n+ of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 2:\n+ {\n+ uint16_t op1_16 = decode->op[0].val;\n+ uint16_t res;\n \n- cf = (op1_8 >> (count - 1)) & 0x1;\n- of = (((res << 1) ^ res) >> 7) & 0x1; // of = result6 ^ result7\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ count %= 17;\n+ if (!count) {\n break;\n }\n- case 2:\n- {\n- uint16_t op1_16 = decode->op[0].val;\n- uint16_t res;\n+ res = (op1_16 >> count) | (get_CF(cpu) << (16 - count)) |\n+ (op1_16 << (17 - count));\n \n- count %= 17;\n- if (!count)\n- break;\n- res = (op1_16 >> count) | (get_CF(cpu) << (16 - count)) | (op1_16 << (17 - count));\n+ write_val_ext(cpu, decode->op[0].ptr, res, 2);\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ cf = (op1_16 >> (count - 1)) & 0x1;\n+ of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^\n+ result14 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n+ }\n+ case 4:\n+ {\n+ uint32_t res;\n+ uint32_t op1_32 = decode->op[0].val;\n \n- cf = (op1_16 >> (count - 1)) & 0x1;\n- of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; // of = result15 ^ result14\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ if (!count) {\n break;\n }\n- case 4:\n- {\n- uint32_t res;\n- uint32_t op1_32 = decode->op[0].val;\n-\n- if (!count)\n- break;\n- \n- if (1 == count)\n- res = (op1_32 >> 1) | (get_CF(cpu) << 31);\n- else\n- res = (op1_32 >> count) | (get_CF(cpu) << (32 - count)) | (op1_32 << (33 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ if (1 == count) {\n+ res = (op1_32 >> 1) | (get_CF(cpu) << 31);\n+ } else {\n+ res = (op1_32 >> count) | (get_CF(cpu) << (32 - count)) |\n+ (op1_32 << (33 - count));\n+ }\n \n- cf = (op1_32 >> (count - 1)) & 0x1;\n- of = ((res << 1) ^ res) >> 31; // of = result30 ^ result31\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n- break;\n+ write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+\n+ cf = (op1_32 >> (count - 1)) & 0x1;\n+ of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */\n+ SET_FLAGS_OxxxxC(cpu, of, cf);\n+ break;\n }\n }\n RIP(cpu) += decode->len;\n@@ -1323,8 +1386,10 @@ static void exec_xchg(struct CPUState *cpu, struct x86_decode *decode)\n {\n fetch_operands(cpu, decode, 2, true, true, false);\n \n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, decode->operand_size);\n- write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val, decode->operand_size);\n+ write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val,\n+ decode->operand_size);\n+ write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val,\n+ decode->operand_size);\n \n RIP(cpu) += decode->len;\n }\n@@ -1332,7 +1397,8 @@ static void exec_xchg(struct CPUState *cpu, struct x86_decode *decode)\n static void exec_xadd(struct CPUState *cpu, struct x86_decode *decode)\n {\n EXEC_2OP_ARITH_CMD(cpu, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n- write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val, decode->operand_size);\n+ write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val,\n+ decode->operand_size);\n \n RIP(cpu) += decode->len;\n }\n@@ -1388,13 +1454,9 @@ static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];\n static void init_cmd_handler(CPUState *cpu)\n {\n int i;\n- for (i = 0; i < ARRAY_SIZE(handlers); i++)\n+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {\n _cmd_handler[handlers[i].cmd] = handlers[i];\n-}\n-\n-static void print_debug(struct CPUState *cpu)\n-{\n- printf(\"%llx: eax %llx ebx %llx ecx %llx edx %llx esi %llx edi %llx ebp %llx esp %llx flags %llx\\n\", RIP(cpu), RAX(cpu), RBX(cpu), RCX(cpu), RDX(cpu), RSI(cpu), RDI(cpu), RBP(cpu), RSP(cpu), EFLAGS(cpu));\n+ }\n }\n \n void load_regs(struct CPUState *cpu)\n@@ -1408,14 +1470,13 @@ void load_regs(struct CPUState *cpu)\n RRX(cpu, REG_RDI) = rreg(cpu->hvf_fd, HV_X86_RDI);\n RRX(cpu, REG_RSP) = rreg(cpu->hvf_fd, HV_X86_RSP);\n RRX(cpu, REG_RBP) = rreg(cpu->hvf_fd, HV_X86_RBP);\n- for (i = 8; i < 16; i++)\n+ for (i = 8; i < 16; i++) {\n RRX(cpu, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);\n- \n+ }\n+\n RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n rflags_to_lflags(cpu);\n RIP(cpu) = rreg(cpu->hvf_fd, HV_X86_RIP);\n-\n- //print_debug(cpu);\n }\n \n void store_regs(struct CPUState *cpu)\n@@ -1429,32 +1490,36 @@ void store_regs(struct CPUState *cpu)\n wreg(cpu->hvf_fd, HV_X86_RDI, RDI(cpu));\n wreg(cpu->hvf_fd, HV_X86_RBP, RBP(cpu));\n wreg(cpu->hvf_fd, HV_X86_RSP, RSP(cpu));\n- for (i = 8; i < 16; i++)\n+ for (i = 8; i < 16; i++) {\n wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(cpu, i));\n- \n+ }\n+\n lflags_to_rflags(cpu);\n wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(cpu));\n macvm_set_rip(cpu, RIP(cpu));\n-\n- //print_debug(cpu);\n }\n \n bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins)\n {\n- //if (hvf_vcpu_id(cpu))\n- //printf(\"%d, %llx: exec_instruction %s\\n\", hvf_vcpu_id(cpu), RIP(cpu), decode_cmd_to_string(ins->cmd));\n- \n+ /*if (hvf_vcpu_id(cpu))\n+ printf(\"%d, %llx: exec_instruction %s\\n\", hvf_vcpu_id(cpu), RIP(cpu),\n+ decode_cmd_to_string(ins->cmd));*/\n+\n if (0 && ins->is_fpu) {\n VM_PANIC(\"emulate fpu\\n\");\n } else {\n if (!_cmd_handler[ins->cmd].handler) {\n- printf(\"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu), ins->cmd, ins->opcode[0],\n- ins->opcode_len > 1 ? ins->opcode[1] : 0);\n+ printf(\"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu),\n+ ins->cmd, ins->opcode[0],\n+ ins->opcode_len > 1 ? ins->opcode[1] : 0);\n RIP(cpu) += ins->len;\n return true;\n }\n- \n- VM_PANIC_ON_EX(!_cmd_handler[ins->cmd].handler, \"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu), ins->cmd, ins->opcode[0], ins->opcode_len > 1 ? ins->opcode[1] : 0);\n+\n+ VM_PANIC_ON_EX(!_cmd_handler[ins->cmd].handler,\n+ \"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu),\n+ ins->cmd, ins->opcode[0],\n+ ins->opcode_len > 1 ? ins->opcode[1] : 0);\n _cmd_handler[ins->cmd].handler(cpu, ins);\n }\n return true;\ndiff --git a/target/i386/hvf-utils/x86_emu.h b/target/i386/hvf-utils/x86_emu.h\nindex c56b2798fa..f7a739bb0a 100644\n--- a/target/i386/hvf-utils/x86_emu.h\n+++ b/target/i386/hvf-utils/x86_emu.h\n@@ -13,4 +13,19 @@ void store_regs(struct CPUState *cpu);\n void simulate_rdmsr(struct CPUState *cpu);\n void simulate_wrmsr(struct CPUState *cpu);\n \n+addr_t read_reg(struct CPUState *cpu, int reg, int size);\n+void write_reg(struct CPUState *cpu, int reg, addr_t val, int size);\n+addr_t read_val_from_reg(addr_t reg_ptr, int size);\n+void write_val_to_reg(addr_t reg_ptr, addr_t val, int size);\n+void write_val_ext(struct CPUState *cpu, addr_t ptr, addr_t val, int size);\n+uint8_t *read_mmio(struct CPUState *cpu, addr_t ptr, int bytes);\n+addr_t read_val_ext(struct CPUState *cpu, addr_t ptr, int size);\n+\n+void exec_movzx(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_shl(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_movsx(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_ror(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_rol(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_rcl(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_rcr(struct CPUState *cpu, struct x86_decode *decode);\n #endif\ndiff --git a/target/i386/hvf-utils/x86_flags.c b/target/i386/hvf-utils/x86_flags.c\nindex ca876d03dd..187ab9b56b 100644\n--- a/target/i386/hvf-utils/x86_flags.c\n+++ b/target/i386/hvf-utils/x86_flags.c\n@@ -32,65 +32,78 @@ void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf)\n {\n uint32_t temp_po = new_of ^ new_cf;\n cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);\n- cpu->hvf_x86->lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);\n+ cpu->hvf_x86->lflags.auxbits |= (temp_po << LF_BIT_PO) |\n+ (new_cf << LF_BIT_CF);\n }\n \n-void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff)\n {\n SET_FLAGS_OSZAP_SUB_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff)\n {\n SET_FLAGS_OSZAP_SUB_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff)\n {\n SET_FLAGS_OSZAP_SUB_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff)\n+void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff)\n {\n SET_FLAGS_OSZAP_ADD_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff)\n+void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff)\n {\n SET_FLAGS_OSZAP_ADD_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff)\n+void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff)\n {\n SET_FLAGS_OSZAP_ADD_8(v1, v2, diff);\n }\n@@ -264,19 +277,22 @@ bool get_ZF(struct CPUState *cpu)\n void set_ZF(struct CPUState *cpu, bool val)\n {\n if (val) {\n- cpu->hvf_x86->lflags.auxbits ^= (((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);\n- // merge the parity bits into the Parity Delta Byte\n+ cpu->hvf_x86->lflags.auxbits ^=\n+ (((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);\n+ /* merge the parity bits into the Parity Delta Byte */\n uint32_t temp_pdb = (255 & cpu->hvf_x86->lflags.result);\n cpu->hvf_x86->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);\n- // now zero the .result value\n+ /* now zero the .result value */\n cpu->hvf_x86->lflags.result = 0;\n- } else\n+ } else {\n cpu->hvf_x86->lflags.result |= (1 << 8);\n+ }\n }\n \n bool get_SF(struct CPUState *cpu)\n {\n- return ((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) ^ (cpu->hvf_x86->lflags.auxbits >> LF_BIT_SD)) & 1;\n+ return ((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) ^\n+ (cpu->hvf_x86->lflags.auxbits >> LF_BIT_SD)) & 1;\n }\n \n void set_SF(struct CPUState *cpu, bool val)\ndiff --git a/target/i386/hvf-utils/x86_flags.h b/target/i386/hvf-utils/x86_flags.h\nindex f963f8ad1b..68a0c10b90 100644\n--- a/target/i386/hvf-utils/x86_flags.h\n+++ b/target/i386/hvf-utils/x86_flags.h\n@@ -55,19 +55,24 @@ typedef struct lazy_flags {\n #define GET_ADD_OVERFLOW(op1, op2, result, mask) \\\n ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))\n \n-// *******************\n-// OSZAPC\n-// *******************\n+/* ******************* */\n+/* OSZAPC */\n+/* ******************* */\n \n /* size, carries, result */\n #define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \\\n addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \\\n (((lf_carries) >> (size - 2)) << LF_BIT_PO); \\\n cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n- if ((size) == 32) temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n- else if ((size) == 16) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n- else if ((size) == 8) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n- else VM_PANIC(\"unimplemented\"); \\\n+ if ((size) == 32) { \\\n+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n+ } else if ((size) == 16) { \\\n+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n+ } else if ((size) == 8) { \\\n+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n+ } else { \\\n+ VM_PANIC(\"unimplemented\"); \\\n+ } \\\n cpu->hvf_x86->lflags.auxbits = (addr_t)(uint32_t)temp; \\\n }\n \n@@ -87,10 +92,15 @@ typedef struct lazy_flags {\n #define SET_FLAGS_OSZAPC_LOGIC_32(result_32) \\\n SET_FLAGS_OSZAPC_32(0, (result_32))\n #define SET_FLAGS_OSZAPC_LOGIC_SIZE(size, result) { \\\n- if (32 == size) {SET_FLAGS_OSZAPC_LOGIC_32(result);} \\\n- else if (16 == size) {SET_FLAGS_OSZAPC_LOGIC_16(result);} \\\n- else if (8 == size) {SET_FLAGS_OSZAPC_LOGIC_8(result);} \\\n- else VM_PANIC(\"unimplemented\"); \\\n+ if (32 == size) { \\\n+ SET_FLAGS_OSZAPC_LOGIC_32(result); \\\n+ } else if (16 == size) { \\\n+ SET_FLAGS_OSZAPC_LOGIC_16(result); \\\n+ } else if (8 == size) { \\\n+ SET_FLAGS_OSZAPC_LOGIC_8(result); \\\n+ } else { \\\n+ VM_PANIC(\"unimplemented\"); \\\n+ } \\\n }\n \n /* op1, op2, result */\n@@ -109,17 +119,22 @@ typedef struct lazy_flags {\n #define SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32) \\\n SET_FLAGS_OSZAPC_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))\n \n-// *******************\n-// OSZAP\n-// *******************\n+/* ******************* */\n+/* OSZAP */\n+/* ******************* */\n /* size, carries, result */\n #define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \\\n addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \\\n (((lf_carries) >> (size - 2)) << LF_BIT_PO); \\\n- if ((size) == 32) temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n- else if ((size) == 16) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n- else if ((size) == 8) temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n- else VM_PANIC(\"unimplemented\"); \\\n+ if ((size) == 32) { \\\n+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n+ } else if ((size) == 16) { \\\n+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \\\n+ } else if ((size) == 8) { \\\n+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \\\n+ } else { \\\n+ VM_PANIC(\"unimplemented\"); \\\n+ } \\\n cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n addr_t delta_c = (cpu->hvf_x86->lflags.auxbits ^ temp) & LF_MASK_CF; \\\n delta_c ^= (delta_c >> 1); \\\n@@ -150,9 +165,9 @@ typedef struct lazy_flags {\n #define SET_FLAGS_OSZAP_SUB_32(op1_32, op2_32, diff_32) \\\n SET_FLAGS_OSZAP_32(SUB_COUT_VEC((op1_32), (op2_32), (diff_32)), (diff_32))\n \n-// *******************\n-// OSZAxC\n-// *******************\n+/* ******************* */\n+/* OSZAxC */\n+/* ******************* */\n /* size, carries, result */\n #define SET_FLAGS_OSZAxC_LOGIC_SIZE(size, lf_result) { \\\n bool saved_PF = getB_PF(); \\\n@@ -183,21 +198,33 @@ void set_OSZAPC(struct CPUState *cpu, uint32_t flags32);\n \n void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf);\n \n-void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n-void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n-void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n-\n-void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n-void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n-void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n-\n-void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n-void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n-void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n-\n-void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2, uint32_t diff);\n-void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2, uint16_t diff);\n-void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2, uint8_t diff);\n+void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff);\n+void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff);\n+void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff);\n+\n+void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff);\n+void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff);\n+void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff);\n+\n+void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff);\n+void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff);\n+void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff);\n+\n+void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+ uint32_t diff);\n+void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+ uint16_t diff);\n+void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+ uint8_t diff);\n \n void SET_FLAGS_OSZAPC_LOGIC32(struct CPUState *cpu, uint32_t diff);\n void SET_FLAGS_OSZAPC_LOGIC16(struct CPUState *cpu, uint16_t diff);\n@@ -215,4 +242,6 @@ void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res);\n void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res);\n void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res);\n \n+bool _get_OF(struct CPUState *cpu);\n+bool _get_CF(struct CPUState *cpu);\n #endif /* __X86_FLAGS_H__ */\ndiff --git a/target/i386/hvf-utils/x86_mmu.c b/target/i386/hvf-utils/x86_mmu.c\nindex 00fae735be..4c9958ef4c 100644\n--- a/target/i386/hvf-utils/x86_mmu.c\n+++ b/target/i386/hvf-utils/x86_mmu.c\n@@ -54,10 +54,12 @@ struct gpt_translation {\n \n static int gpt_top_level(struct CPUState *cpu, bool pae)\n {\n- if (!pae)\n+ if (!pae) {\n return 2;\n- if (x86_is_long_mode(cpu))\n+ }\n+ if (x86_is_long_mode(cpu)) {\n return 4;\n+ }\n \n return 3;\n }\n@@ -74,18 +76,21 @@ static inline int pte_size(bool pae)\n }\n \n \n-static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int level, bool pae)\n+static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,\n+ int level, bool pae)\n {\n int index;\n uint64_t pte = 0;\n addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n addr_t gpa = pt->pte[level] & page_mask;\n \n- if (level == 3 && !x86_is_long_mode(cpu))\n+ if (level == 3 && !x86_is_long_mode(cpu)) {\n gpa = pt->pte[level];\n+ }\n \n index = gpt_entry(pt->gva, level, pae);\n- address_space_rw(&address_space_memory, gpa + index * pte_size(pae), MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);\n+ address_space_rw(&address_space_memory, gpa + index * pte_size(pae),\n+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);\n \n pt->pte[level - 1] = pte;\n \n@@ -93,32 +98,38 @@ static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int l\n }\n \n /* test page table entry */\n-static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int level, bool *is_large, bool pae)\n+static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,\n+ int level, bool *is_large, bool pae)\n {\n uint64_t pte = pt->pte[level];\n- \n- if (pt->write_access)\n+\n+ if (pt->write_access) {\n pt->err_code |= MMU_PAGE_WT;\n- if (pt->user_access)\n+ }\n+ if (pt->user_access) {\n pt->err_code |= MMU_PAGE_US;\n- if (pt->exec_access)\n+ }\n+ if (pt->exec_access) {\n pt->err_code |= MMU_PAGE_NX;\n+ }\n \n if (!pte_present(pte)) {\n- addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n+ /* addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; */\n return false;\n }\n- \n- if (pae && !x86_is_long_mode(cpu) && 2 == level)\n+\n+ if (pae && !x86_is_long_mode(cpu) && 2 == level) {\n goto exit;\n- \n+ }\n+\n if (1 == level && pte_large_page(pte)) {\n pt->err_code |= MMU_PAGE_PT;\n *is_large = true;\n }\n- if (!level)\n+ if (!level) {\n pt->err_code |= MMU_PAGE_PT;\n- \n+ }\n+\n addr_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n /* check protection */\n if (cr0 & CR0_WP) {\n@@ -134,7 +145,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, int\n if (pae && pt->exec_access && !pte_exec_access(pte)) {\n return false;\n }\n- \n+\n exit:\n /* TODO: check reserved bits */\n return true;\n@@ -149,22 +160,24 @@ static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)\n {\n VM_PANIC_ON(!pte_large_page(pt->pte[1]))\n /* 2Mb large page */\n- if (pae)\n+ if (pae) {\n return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);\n- \n+ }\n+\n /* 4Mb large page */\n return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);\n }\n \n \n \n-static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code, struct gpt_translation* pt, bool pae)\n+static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code,\n+ struct gpt_translation *pt, bool pae)\n {\n int top_level, level;\n bool is_large = false;\n addr_t cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);\n addr_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n- \n+\n memset(pt, 0, sizeof(*pt));\n top_level = gpt_top_level(cpu, pae);\n \n@@ -173,7 +186,7 @@ static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code, struct gpt\n pt->user_access = (err_code & MMU_PAGE_US);\n pt->write_access = (err_code & MMU_PAGE_WT);\n pt->exec_access = (err_code & MMU_PAGE_NX);\n- \n+\n for (level = top_level; level > 0; level--) {\n get_pt_entry(cpu, pt, level, pae);\n \n@@ -181,14 +194,16 @@ static bool walk_gpt(struct CPUState *cpu, addr_t addr, int err_code, struct gpt\n return false;\n }\n \n- if (is_large)\n+ if (is_large) {\n break;\n+ }\n }\n \n- if (!is_large)\n+ if (!is_large) {\n pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);\n- else\n+ } else {\n pt->gpa = large_page_gpa(pt, pae);\n+ }\n \n return true;\n }\n@@ -214,18 +229,20 @@ bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa)\n return false;\n }\n \n-void vmx_write_mem(struct CPUState* cpu, addr_t gva, void *data, int bytes)\n+void vmx_write_mem(struct CPUState *cpu, addr_t gva, void *data, int bytes)\n {\n addr_t gpa;\n \n while (bytes > 0) {\n- // copy page\n+ /* copy page */\n int copy = MIN(bytes, 0x1000 - (gva & 0xfff));\n \n if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {\n- VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __FUNCTION__, gva);\n+ VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __func__,\n+ gva);\n } else {\n- address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, copy, 1);\n+ address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,\n+ data, copy, 1);\n }\n \n bytes -= copy;\n@@ -234,18 +251,20 @@ void vmx_write_mem(struct CPUState* cpu, addr_t gva, void *data, int bytes)\n }\n }\n \n-void vmx_read_mem(struct CPUState* cpu, void *data, addr_t gva, int bytes)\n+void vmx_read_mem(struct CPUState *cpu, void *data, addr_t gva, int bytes)\n {\n addr_t gpa;\n \n while (bytes > 0) {\n- // copy page\n+ /* copy page */\n int copy = MIN(bytes, 0x1000 - (gva & 0xfff));\n \n if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {\n- VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __FUNCTION__, gva);\n+ VM_PANIC_ON_EX(1, \"%s: mmu_gva_to_gpa %llx failed\\n\", __func__,\n+ gva);\n }\n- address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED, data, copy, 0);\n+ address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,\n+ data, copy, 0);\n \n bytes -= copy;\n gva += copy;\ndiff --git a/target/i386/hvf-utils/x86_mmu.h b/target/i386/hvf-utils/x86_mmu.h\nindex c31bf28982..b5d2d59067 100644\n--- a/target/i386/hvf-utils/x86_mmu.h\n+++ b/target/i386/hvf-utils/x86_mmu.h\n@@ -14,7 +14,7 @@\n #define PT_GLOBAL (1 << 8)\n #define PT_NX (1llu << 63)\n \n-// error codes\n+/* error codes */\n #define MMU_PAGE_PT (1 << 0)\n #define MMU_PAGE_WT (1 << 1)\n #define MMU_PAGE_US (1 << 2)\n@@ -22,7 +22,7 @@\n \n bool mmu_gva_to_gpa(struct CPUState *cpu, addr_t gva, addr_t *gpa);\n \n-void vmx_write_mem(struct CPUState* cpu, addr_t gva, void *data, int bytes);\n-void vmx_read_mem(struct CPUState* cpu, void *data, addr_t gva, int bytes);\n+void vmx_write_mem(struct CPUState *cpu, addr_t gva, void *data, int bytes);\n+void vmx_read_mem(struct CPUState *cpu, void *data, addr_t gva, int bytes);\n \n #endif /* __X86_MMU_H__ */\ndiff --git a/target/i386/hvf-utils/x86hvf.c b/target/i386/hvf-utils/x86hvf.c\nindex 5c1d5ece36..819d760624 100644\n--- a/target/i386/hvf-utils/x86hvf.c\n+++ b/target/i386/hvf-utils/x86hvf.c\n@@ -35,16 +35,16 @@\n #include <Hypervisor/hv_vmx.h>\n #include <stdint.h>\n \n-void hvf_cpu_synchronize_state(struct CPUState* cpu_state);\n-\n-void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr)\n+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,\n+ SegmentCache *qseg, bool is_tr)\n {\n vmx_seg->sel = qseg->selector;\n vmx_seg->base = qseg->base;\n vmx_seg->limit = qseg->limit;\n \n if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {\n- // the TR register is usable after processor reset despite having a null selector\n+ /* the TR register is usable after processor reset despite\n+ * having a null selector */\n vmx_seg->ar = 1 << 16;\n return;\n }\n@@ -87,19 +87,18 @@ void hvf_put_xsave(CPUState *cpu_state)\n }\n }\n \n-void vmx_update_tpr(CPUState *cpu);\n void hvf_put_segments(CPUState *cpu_state)\n {\n CPUX86State *env = &X86_CPU(cpu_state)->env;\n struct vmx_segment seg;\n- \n+\n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);\n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);\n \n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);\n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);\n \n- //wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]);\n+ /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */\n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);\n vmx_update_tpr(cpu_state);\n wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);\n@@ -109,7 +108,7 @@ void hvf_put_segments(CPUState *cpu_state)\n \n hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);\n vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_CS);\n- \n+\n hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);\n vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_DS);\n \n@@ -130,17 +129,20 @@ void hvf_put_segments(CPUState *cpu_state)\n \n hvf_set_segment(cpu_state, &seg, &env->ldt, false);\n vmx_write_segment_descriptor(cpu_state, &seg, REG_SEG_LDTR);\n- \n+\n hv_vcpu_flush(cpu_state->hvf_fd);\n }\n- \n+\n void hvf_put_msrs(CPUState *cpu_state)\n {\n CPUX86State *env = &X86_CPU(cpu_state)->env;\n \n- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs);\n- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);\n- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);\n+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,\n+ env->sysenter_cs);\n+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,\n+ env->sysenter_esp);\n+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,\n+ env->sysenter_eip);\n \n hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);\n \n@@ -154,8 +156,8 @@ void hvf_put_msrs(CPUState *cpu_state)\n hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);\n hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);\n \n- // if (!osx_is_sierra())\n- // wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());\n+ /* if (!osx_is_sierra())\n+ wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/\n hv_vm_sync_tsc(env->tsc);\n }\n \n@@ -183,7 +185,7 @@ void hvf_get_segments(CPUState *cpu_state)\n \n vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_CS);\n hvf_get_segment(&env->segs[R_CS], &seg);\n- \n+\n vmx_read_segment_descriptor(cpu_state, &seg, REG_SEG_DS);\n hvf_get_segment(&env->segs[R_DS], &seg);\n \n@@ -214,7 +216,7 @@ void hvf_get_segments(CPUState *cpu_state)\n env->cr[2] = 0;\n env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);\n env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);\n- \n+\n env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);\n }\n \n@@ -222,10 +224,10 @@ void hvf_get_msrs(CPUState *cpu_state)\n {\n CPUX86State *env = &X86_CPU(cpu_state)->env;\n uint64_t tmp;\n- \n+\n hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);\n env->sysenter_cs = tmp;\n- \n+\n hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);\n env->sysenter_esp = tmp;\n \n@@ -242,7 +244,7 @@ void hvf_get_msrs(CPUState *cpu_state)\n #endif\n \n hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);\n- \n+\n env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);\n }\n \n@@ -269,15 +271,15 @@ int hvf_put_registers(CPUState *cpu_state)\n wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);\n wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);\n wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);\n- \n+\n wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);\n- \n+\n hvf_put_xsave(cpu_state);\n- \n+\n hvf_put_segments(cpu_state);\n- \n+\n hvf_put_msrs(cpu_state);\n- \n+\n wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);\n wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);\n wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);\n@@ -286,7 +288,7 @@ int hvf_put_registers(CPUState *cpu_state)\n wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);\n wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);\n wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);\n- \n+\n return 0;\n }\n \n@@ -312,16 +314,16 @@ int hvf_get_registers(CPUState *cpu_state)\n env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);\n env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);\n env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);\n- \n+\n env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);\n- \n+\n hvf_get_xsave(cpu_state);\n env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);\n- \n+\n hvf_get_segments(cpu_state);\n hvf_get_msrs(cpu_state);\n- \n+\n env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);\n env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);\n env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);\n@@ -330,7 +332,7 @@ int hvf_get_registers(CPUState *cpu_state)\n env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);\n env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);\n env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);\n- \n+\n return 0;\n }\n \n@@ -338,14 +340,16 @@ static void vmx_set_int_window_exiting(CPUState *cpu)\n {\n uint64_t val;\n val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |\n+ VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n }\n \n void vmx_clear_int_window_exiting(CPUState *cpu)\n {\n uint64_t val;\n val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &\n+ ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n }\n \n #define NMI_VEC 2\n@@ -353,28 +357,30 @@ void vmx_clear_int_window_exiting(CPUState *cpu)\n void hvf_inject_interrupts(CPUState *cpu_state)\n {\n X86CPU *x86cpu = X86_CPU(cpu_state);\n- int allow_nmi = !(rvmcs(cpu_state->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING);\n+ int allow_nmi = !(rvmcs(cpu_state->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n+ VMCS_INTERRUPTIBILITY_NMI_BLOCKING);\n \n uint64_t idt_info = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_INFO);\n uint64_t info = 0;\n- \n+\n if (idt_info & VMCS_IDT_VEC_VALID) {\n uint8_t vector = idt_info & 0xff;\n uint64_t intr_type = idt_info & VMCS_INTR_T_MASK;\n info = idt_info;\n- \n+\n uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);\n if (intr_type == VMCS_INTR_T_NMI && reason != EXIT_REASON_TASK_SWITCH) {\n allow_nmi = 1;\n vmx_clear_nmi_blocking(cpu_state);\n }\n- \n+\n if ((allow_nmi || intr_type != VMCS_INTR_T_NMI)) {\n info &= ~(1 << 12); /* clear undefined bit */\n if (intr_type == VMCS_INTR_T_SWINTR ||\n intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||\n intr_type == VMCS_INTR_T_SWEXCEPTION) {\n- uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+ uint64_t ins_len = rvmcs(cpu_state->hvf_fd,\n+ VMCS_EXIT_INSTRUCTION_LENGTH);\n wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len);\n }\n if (vector == EXCEPTION_BP || vector == EXCEPTION_OF) {\n@@ -384,16 +390,17 @@ void hvf_inject_interrupts(CPUState *cpu_state)\n */\n info &= ~VMCS_INTR_T_MASK;\n info |= VMCS_INTR_T_SWEXCEPTION;\n- uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+ uint64_t ins_len = rvmcs(cpu_state->hvf_fd,\n+ VMCS_EXIT_INSTRUCTION_LENGTH);\n wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len);\n }\n- \n+\n uint64_t err = 0;\n if (idt_info & VMCS_INTR_DEL_ERRCODE) {\n err = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_ERROR);\n wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, err);\n }\n- //printf(\"reinject %lx err %d\\n\", info, err);\n+ /*printf(\"reinject %lx err %d\\n\", info, err);*/\n wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);\n };\n }\n@@ -408,22 +415,26 @@ void hvf_inject_interrupts(CPUState *cpu_state)\n }\n }\n \n- if (cpu_state->hvf_x86->interruptable && (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n+ if (cpu_state->hvf_x86->interruptable &&\n+ (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n (EFLAGS(cpu_state) & IF_MASK) && !(info & VMCS_INTR_VALID)) {\n int line = cpu_get_pic_interrupt(&x86cpu->env);\n cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;\n- if (line >= 0)\n- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);\n+ if (line >= 0) {\n+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |\n+ VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);\n+ }\n }\n- if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD)\n+ if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {\n vmx_set_int_window_exiting(cpu_state);\n+ }\n }\n \n int hvf_process_events(CPUState *cpu_state)\n {\n X86CPU *cpu = X86_CPU(cpu_state);\n CPUX86State *env = &cpu->env;\n- \n+\n EFLAGS(cpu_state) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n \n if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {\n@@ -435,7 +446,8 @@ int hvf_process_events(CPUState *cpu_state)\n cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;\n apic_poll_irq(cpu->apic_state);\n }\n- if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(cpu_state) & IF_MASK)) ||\n+ if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n+ (EFLAGS(cpu_state) & IF_MASK)) ||\n (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {\n cpu_state->halted = 0;\n }\ndiff --git a/target/i386/hvf-utils/x86hvf.h b/target/i386/hvf-utils/x86hvf.h\nindex b4cb4c4d26..a81f7c41c7 100644\n--- a/target/i386/hvf-utils/x86hvf.h\n+++ b/target/i386/hvf-utils/x86hvf.h\n@@ -7,7 +7,8 @@ int hvf_process_events(CPUState *);\n int hvf_put_registers(CPUState *);\n int hvf_get_registers(CPUState *);\n void hvf_inject_interrupts(CPUState *);\n-void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr);\n+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,\n+ SegmentCache *qseg, bool is_tr);\n void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);\n void hvf_put_xsave(CPUState *cpu_state);\n void hvf_put_segments(CPUState *cpu_state);\n@@ -16,4 +17,6 @@ void hvf_get_xsave(CPUState *cpu_state);\n void hvf_get_msrs(CPUState *cpu_state);\n void vmx_clear_int_window_exiting(CPUState *cpu);\n void hvf_get_segments(CPUState *cpu_state);\n+void vmx_update_tpr(CPUState *cpu);\n+void hvf_cpu_synchronize_state(CPUState *cpu_state);\n #endif\n", "prefixes": [ "07/14" ] }