diff mbox

[-V2,3/4] target-ppc: Fix page table lookup with kvm enabled

Message ID 1376995766-16526-4-git-send-email-aneesh.kumar@linux.vnet.ibm.com
State New
Headers show

Commit Message

Aneesh Kumar K.V Aug. 20, 2013, 10:49 a.m. UTC
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

With kvm enabled, we store the hash page table information in the hypervisor.
Use ioctl to read the htab contents. Without this we get the below error when
trying to read the guest address

 (gdb) x/10 do_fork
 0xc000000000098660 <do_fork>:   Cannot access memory at address 0xc000000000098660
 (gdb)

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 target-ppc/kvm.c        | 45 +++++++++++++++++++++++++++++++++++++++++++++
 target-ppc/kvm_ppc.h    |  9 ++++++++-
 target-ppc/mmu-hash64.c | 25 ++++++++++++++++---------
 3 files changed, 69 insertions(+), 10 deletions(-)
diff mbox

Patch

diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index a629447..b590808 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1885,3 +1885,48 @@  int kvm_arch_on_sigbus(int code, void *addr)
 void kvm_arch_init_irq_routing(KVMState *s)
 {
 }
+
+int kvmppc_hash64_load_hpte(PowerPCCPU *cpu, uint64_t index,
+                            target_ulong *hpte0, target_ulong *hpte1)
+{
+    int htab_fd;
+    struct kvm_get_htab_fd ghf;
+    struct kvm_get_htab_buf {
+        struct kvm_get_htab_header header;
+        /*
+         * Older kernel required one extra byte.
+         */
+        unsigned long hpte[3];
+    } hpte_buf;
+
+    *hpte0 = 0;
+    *hpte1 = 0;
+    if (!cap_htab_fd) {
+        return 0;
+    }
+    /*
+     * At this point we are only interested in reading only bolted entries
+     */
+    ghf.flags = KVM_GET_HTAB_BOLTED_ONLY;
+    ghf.start_index = index;
+    htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
+    if (htab_fd < 0) {
+        return htab_fd;
+    }
+
+    if (read(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
+        goto out;
+    }
+    /*
+     * We only requested for one entry, So we should get only 1
+     * valid entry at the same index
+     */
+    if (hpte_buf.header.n_valid != 1 || hpte_buf.header.index != index) {
+        goto out;
+    }
+    *hpte0 = hpte_buf.hpte[0];
+    *hpte1 = hpte_buf.hpte[1];
+out:
+    close(htab_fd);
+    return 0;
+}
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index 4ae7bf2..e25373a 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -42,7 +42,8 @@  int kvmppc_get_htab_fd(bool write);
 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
                            uint16_t n_valid, uint16_t n_invalid);
-
+int kvmppc_hash64_load_hpte(PowerPCCPU *cpu, uint64_t index,
+                            target_ulong *hpte0, target_ulong *hpte1);
 #else
 
 static inline uint32_t kvmppc_get_tbfreq(void)
@@ -181,6 +182,12 @@  static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
     abort();
 }
 
+static inline int kvmppc_hash64_load_hpte(PowerPCCPU *cpu, uint64_t index,
+                                          target_ulong *hpte0,
+                                          target_ulong *hpte1)
+{
+    abort();
+}
 #endif
 
 #ifndef CONFIG_KVM
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 67fc1b5..4d8120c 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -302,17 +302,26 @@  static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
     return prot;
 }
 
-static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr pteg_off,
+static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
                                      bool secondary, target_ulong ptem,
                                      ppc_hash_pte64_t *pte)
 {
-    hwaddr pte_offset = pteg_off;
+    uint64_t index;
+    hwaddr pte_offset;
     target_ulong pte0, pte1;
     int i;
 
+    pte_offset = (hash * HASH_PTEG_SIZE_64) & env->htab_mask;;
+    index = (hash * HPTES_PER_GROUP) & env->htab_mask;
+
     for (i = 0; i < HPTES_PER_GROUP; i++) {
-        pte0 = ppc_hash64_load_hpte0(env, pte_offset);
-        pte1 = ppc_hash64_load_hpte1(env, pte_offset);
+        if (kvm_enabled()) {
+            index += i;
+            kvmppc_hash64_load_hpte(ppc_env_get_cpu(env), index, &pte0, &pte1);
+        } else {
+            pte0 = ppc_hash64_load_hpte0(env, pte_offset);
+            pte1 = ppc_hash64_load_hpte1(env, pte_offset);
+        }
 
         if ((pte0 & HPTE64_V_VALID)
             && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
@@ -332,7 +341,7 @@  static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
                                      ppc_slb_t *slb, target_ulong eaddr,
                                      ppc_hash_pte64_t *pte)
 {
-    hwaddr pteg_off, pte_offset;
+    hwaddr pte_offset;
     hwaddr hash;
     uint64_t vsid, epnshift, epnmask, epn, ptem;
 
@@ -367,8 +376,7 @@  static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
             " hash=" TARGET_FMT_plx "\n",
             env->htab_base, env->htab_mask, vsid, ptem,  hash);
-    pteg_off = (hash * HASH_PTEG_SIZE_64) & env->htab_mask;
-    pte_offset = ppc_hash64_pteg_search(env, pteg_off, 0, ptem, pte);
+    pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
 
     if (pte_offset == -1) {
         /* Secondary PTEG lookup */
@@ -377,8 +385,7 @@  static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
                 " hash=" TARGET_FMT_plx "\n", env->htab_base,
                 env->htab_mask, vsid, ptem, ~hash);
 
-        pteg_off = (~hash * HASH_PTEG_SIZE_64) & env->htab_mask;
-        pte_offset = ppc_hash64_pteg_search(env, pteg_off, 1, ptem, pte);
+        pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
     }
 
     return pte_offset;