Patchwork [10/40] xenner: kernel: Hypercall handler (i386)

login
register
mail settings
Submitter Alexander Graf
Date Nov. 1, 2010, 3:01 p.m.
Message ID <1288623713-28062-11-git-send-email-agraf@suse.de>
Download mbox | patch
Permalink /patch/69781/
State New
Headers show

Comments

Alexander Graf - Nov. 1, 2010, 3:01 p.m.
Xenner handles guest hypercalls itself. This patch adds all the handling
code that is i386 specific.

Signed-off-by: Alexander Graf <agraf@suse.de>
---
 pc-bios/xenner/xenner-hcall32.c |  299 +++++++++++++++++++++++++++++++++++++++
 1 files changed, 299 insertions(+), 0 deletions(-)
 create mode 100644 pc-bios/xenner/xenner-hcall32.c

Patch

diff --git a/pc-bios/xenner/xenner-hcall32.c b/pc-bios/xenner/xenner-hcall32.c
new file mode 100644
index 0000000..45a3046
--- /dev/null
+++ b/pc-bios/xenner/xenner-hcall32.c
@@ -0,0 +1,299 @@ 
+/*
+ *  Copyright (C) Red Hat 2007
+ *  Copyright (C) Novell Inc. 2010
+ *
+ *  Author(s): Gerd Hoffmann <kraxel@redhat.com>
+ *             Alexander Graf <agraf@suse.de>
+ *
+ *  Xenner 32 bit hypercall handlers
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; under version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <errno.h>
+
+#include "xenner.h"
+
+/* --------------------------------------------------------------------- */
+
+typedef int32_t (*xen_hcall)(struct xen_cpu *cpu, uint32_t *args);
+static int32_t multicall(struct xen_cpu *cpu, uint32_t *args);
+
+/* --------------------------------------------------------------------- */
+
+static int32_t update_va_mapping(struct xen_cpu *cpu, uint32_t *args)
+{
+    uint32_t va    = args[0];
+    uint64_t val   = args[1] | ((uint64_t)args[2] << 32);
+    uint32_t flags = args[3];
+    void *map = NULL;
+    pte_t *ptr = find_pte_lpt(va);
+    pte_t pte;
+
+    printk(5, "%s: va %" PRIx32 " val %" PRIx64 " pte %p\n",
+           __FUNCTION__, va, val, ptr);
+
+    pgtable_walk(cpu, (uint32_t)ptr);
+
+    if (va >= XEN_M2P) {
+        goto inval;
+    }
+
+    if (copypt_pf(&pte, ptr) < 0) {
+        printk(2, "%s: lpt rd fault: va %" PRIx32 " val %" PRIx64 "\n",
+               __FUNCTION__, va, val);
+        goto inval;
+    }
+
+    if ((pte & ~_PAGE_RW) == val && !(val & _PAGE_USER)) {
+        /* ignore make_readonly */
+        vminfo.faults[XEN_FAULT_UPDATE_VA_FIX_RO]++;
+        goto doflags;
+    }
+
+    pte = val;
+    if (!copypt_pf(ptr, &pte)) {
+        goto doflags;
+    }
+
+    printk(1, "%s: lpt wr fault: va %" PRIx32 " val %" PRIx64 "\n",
+           __FUNCTION__, va, val);
+
+    ptr = find_pte_map(cpu, va);
+    if (!ptr) {
+        goto inval;
+    }
+
+    map = ptr;
+    if (!copypt_pf(ptr, &pte)) {
+        goto doflags;
+    }
+
+    printk(1, "%s: map wr fault: va %" PRIx32 " val %" PRIx64 "\n",
+           __FUNCTION__, va, val);
+    goto inval;
+
+doflags:
+    if (map) {
+        free_page(map);
+    }
+    switch (flags & UVMF_FLUSHTYPE_MASK) {
+    case UVMF_NONE:
+        break;
+    case UVMF_TLB_FLUSH:
+        flush_tlb();
+        break;
+    case UVMF_INVLPG:
+        flush_tlb_addr(va);
+        break;
+    }
+    return 0;
+
+inval:
+    if (map) {
+        free_page(map);
+    }
+    return -EINVAL;
+}
+
+static int32_t mmu_update(struct xen_cpu *cpu, uint32_t *args)
+{
+    uint64_t *reqs = (void*)args[0];
+    uint32_t count = args[1];
+    uint32_t *done = (void*)args[2];
+    uint32_t dom   = args[3];
+    uint32_t i;
+
+    if (dom != DOMID_SELF) {
+        printk(1, "%s: foreigndom not supported (domid %d)\n",
+               __FUNCTION__, dom);
+        return -ENOSYS;
+    }
+
+    for (i = 0; i < count; i++) {
+        switch (reqs[0] & 3) {
+        case MMU_NORMAL_PT_UPDATE:
+        {
+            pte_t *pte = map_page(reqs[0]);
+            *pte = reqs[1];
+            free_page(pte);
+#ifdef CONFIG_PAE
+            if (read_cr3_mfn(cpu) == addr_to_frame(reqs[0])) {
+                update_emu_mappings(read_cr3_mfn(cpu));
+                flush_tlb();
+            }
+#endif
+            break;
+        }
+        case MMU_MACHPHYS_UPDATE:
+        {
+            xen_pfn_t gmfn = reqs[0] >> PAGE_SHIFT;
+            xen_pfn_t gpfn = reqs[1];
+            if (gmfn < vmconf.mfn_guest)
+                panic("suspious m2p update", NULL);
+            m2p[gmfn] = gpfn;
+            break;
+        }
+        default:
+            return -ENOSYS;
+        }
+        reqs += 2;
+    }
+    if (done) {
+        *done = i;
+    }
+
+    return 0;
+}
+
+static int32_t iret(struct xen_cpu *cpu, uint32_t *args)
+{
+    struct regs_32 *regs = (void*)cpu->stack_high - sizeof(*regs);
+    uint32_t eflags;
+    uint32_t *stack;
+
+    stack = (void*)regs->esp;
+
+    regs->eax     = stack[0];
+    regs->eip     = stack[1];
+    regs->cs      = stack[2];
+    eflags        = stack[3];
+    regs->eflags  = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+    if (context_is_emu(regs)) {
+        /* not allowed */
+        panic("guest tried iret to ring0", regs);
+
+    } else if (context_is_kernel(cpu, regs)) {
+        /* kernel -> kernel  --  just move stack pointer */
+        regs->esp += 4*4;
+
+    } else {
+        /* kernel -> user  --  switch back stack */
+        regs->esp = stack[4];
+        regs->ss  = stack[5];
+    }
+
+    /* Restore upcall mask from supplied EFLAGS.IF. */
+    if (eflags & X86_EFLAGS_IF) {
+        guest_sti(cpu);
+    } else {
+        guest_cli(cpu);
+    }
+
+    return -EINTR;
+}
+
+/* --------------------------------------------------------------------- */
+
+static xen_hcall hcalls[XEN_HCALL_MAX] = {
+    [ __HYPERVISOR_update_va_mapping ]       = update_va_mapping,
+    [ __HYPERVISOR_mmu_update ]              = mmu_update,
+    [ __HYPERVISOR_mmuext_op ]               = mmuext_op,
+    [ __HYPERVISOR_update_descriptor ]       = update_descriptor,
+    [ __HYPERVISOR_stack_switch ]            = stack_switch,
+    [ __HYPERVISOR_multicall ]               = multicall,
+    [ __HYPERVISOR_iret ]                    = iret,
+    [ __HYPERVISOR_fpu_taskswitch ]          = fpu_taskswitch,
+    [ __HYPERVISOR_grant_table_op ]          = grant_table_op,
+    [ __HYPERVISOR_xen_version ]             = xen_version,
+    [ __HYPERVISOR_vm_assist ]               = vm_assist,
+    [ __HYPERVISOR_sched_op ]                = sched_op,
+    [ __HYPERVISOR_sched_op_compat ]         = sched_op_compat,
+    [ __HYPERVISOR_memory_op ]               = memory_op,
+    [ __HYPERVISOR_set_trap_table ]          = set_trap_table,
+    [ __HYPERVISOR_set_callbacks ]           = set_callbacks,
+    [ __HYPERVISOR_callback_op ]             = callback_op,
+    [ __HYPERVISOR_set_gdt ]                 = set_gdt,
+    [ __HYPERVISOR_vcpu_op ]                 = vcpu_op,
+    [ __HYPERVISOR_event_channel_op ]        = event_channel_op,
+    [ __HYPERVISOR_event_channel_op_compat ] = event_channel_op_compat,
+    [ __HYPERVISOR_set_timer_op ]            = set_timer_op,
+    [ __HYPERVISOR_physdev_op ]              = physdev_op,
+    [ __HYPERVISOR_get_debugreg ]            = get_debugreg,
+    [ __HYPERVISOR_set_debugreg ]            = set_debugreg,
+    [ __HYPERVISOR_console_io ]              = console_io,
+
+    [ __HYPERVISOR_physdev_op_compat ]       = error_noperm,
+    [ __HYPERVISOR_platform_op ]             = error_noperm,
+    [ __HYPERVISOR_set_debugreg ]            = error_noop,
+};
+
+static int32_t multicall(struct xen_cpu *cpu, uint32_t *args)
+{
+    struct multicall_entry *calls = (void*)args[0];
+    uint32_t i, count = args[1];
+    uint32_t margs[6];
+
+    for (i = 0; i < count; i++) {
+        if (!hcalls[calls[i].op])
+            panic("unknown hypercall in multicall list", NULL);
+        vminfo.hcalls[calls[i].op]++;
+        margs[0] = calls[i].args[0];
+        margs[1] = calls[i].args[1];
+        margs[2] = calls[i].args[2];
+        margs[3] = calls[i].args[3];
+        margs[4] = calls[i].args[4];
+        margs[5] = calls[i].args[5];
+        calls[i].result = hcalls[calls[i].op](cpu, margs);
+    }
+    return 0;
+}
+
+asmlinkage void do_hypercall(struct regs_32 *regs)
+{
+    uint32_t args[6];
+    uint32_t retval = -ENOSYS;
+    struct xen_cpu *cpu =get_cpu();
+
+    printk(3, "%s: %s #%d\n", __FUNCTION__,
+           __hypervisor_name(regs->eax), regs->eax);
+
+    if (regs->eax >= XEN_HCALL_MAX) {
+        /* invalid hypercall number */
+        goto handled;
+    }
+    if (!hcalls[regs->eax]) {
+        /* no hypercall handler */
+        goto handled;
+    }
+
+    /* do call */
+    vminfo.hcalls[regs->eax]++;
+    args[0] = regs->ebx;
+    args[1] = regs->ecx;
+    args[2] = regs->edx;
+    args[3] = regs->esi;
+    args[4] = regs->edi;
+    args[5] = regs->ebp;
+    retval = hcalls[regs->eax](cpu, args);
+
+    if (-EINTR == retval) {
+        goto iret;
+    }
+
+handled:
+    if (-ENOSYS == retval) {
+        printk(0, "hypercall %s (#%d)  |  arg0 0x%x  arg1 0x%x  -> -ENOSYS\n",
+               __hypervisor_name(regs->eax), regs->eax, args[0], args[1]);
+    }
+    regs->eax = retval;
+    regs->error = HCALL_HANDLED;
+    evtchn_try_forward(cpu, regs);
+    return;
+
+iret:
+    regs->error = HCALL_HANDLED;
+    evtchn_try_forward(cpu, regs);
+    return;
+}