Message ID | 1457317586-15122-4-git-send-email-david@gibson.dropbear.id.au |
---|---|
State | New |
Headers | show |
On Mon, 7 Mar 2016 13:26:26 +1100 David Gibson <david@gibson.dropbear.id.au> wrote: > fa48b43 "target-ppc: Remove hack for ppc_hash64_load_hpte*() with HV KVM" > purports to remove a hack in the handling of hash page tables (HPTs) > managed by KVM instead of qemu. However, it actually went in the wrong > direction. > > That patch requires anything looking for an external HPT (that is one not > managed by the guest itself) to check both env->external_htab (for a qemu > managed HPT) and kvmppc_kern_htab (for a KVM managed HPT). That's a > problem because kvmppc_kern_htab is local to mmu-hash64.c, but some places > which need to check for an external HPT are outside that, such as > kvm_arch_get_registers(). The latter was subtly broken by the earlier > patch such that gdbstub can no longer access memory. > > Basically a KVM managed HPT is much more like a qemu managed HPT than it is > like a guest managed HPT, so the original "hack" was actually on the right > track. > > This partially reverts fa48b43, so we again mark a KVM managed external HPT > by putting a special but non-NULL value in env->external_htab. It then > goes further, using that marker to eliminate the kvmppc_kern_htab global > entirely. The ppc_hash64_set_external_hpt() helper function is extended > to set that marker if passed a NULL value (if you're setting an external > HPT, but don't have an actual HPT to set, the assumption is that it must > be a KVM managed HPT). > > This also has some flow-on changes to the HPT access helpers, required by > the above changes. > > Reported-by: Greg Kurz <gkurz@linux.vnet.ibm.com> > Signed-off-by: David Gibson <david@gibson.dropbear.id.au> > Reviewed-by: Thomas Huth <thuth@redhat.com> > --- Typo in comment in target-ppc/mmu-hash64.c (see below). Apart from that, you get: Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com> and also... without this patch: 0x00000000100009b8 in main (argc=<error reading variable: Cannot access memory at address 0x3fffc03ce270>, argv=<error reading variable: Cannot access memory at address 0x3fffc03ce278>) at fp.c:11 with this patch: 0x00000000100009b8 in main (argc=4, argv=0x3fffc7fcfd18) at fp.c:11 Just to be sure, I've also tested with TCG: no regression. Thanks for the fix ! Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com> > hw/ppc/spapr.c | 3 +-- > hw/ppc/spapr_hcall.c | 10 +++++----- > target-ppc/mmu-hash64.c | 40 ++++++++++++++++++---------------------- > target-ppc/mmu-hash64.h | 9 +++------ > 4 files changed, 27 insertions(+), 35 deletions(-) > > diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c > index 72a018b..43708a2 100644 > --- a/hw/ppc/spapr.c > +++ b/hw/ppc/spapr.c > @@ -1091,7 +1091,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, > } > > spapr->htab_shift = shift; > - kvmppc_kern_htab = true; > + spapr->htab = NULL; > } else { > /* kernel-side HPT not needed, allocate in userspace instead */ > size_t size = 1ULL << shift; > @@ -1106,7 +1106,6 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, > > memset(spapr->htab, 0, size); > spapr->htab_shift = shift; > - kvmppc_kern_htab = false; > > for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { > DIRTY_HPTE(HPTE(spapr->htab, i)); > diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c > index 1733482..b2b1b93 100644 > --- a/hw/ppc/spapr_hcall.c > +++ b/hw/ppc/spapr_hcall.c > @@ -122,17 +122,17 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, > break; > } > } > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > if (index == 8) { > return H_PTEG_FULL; > } > } else { > token = ppc_hash64_start_access(cpu, pte_index); > if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > return H_PTEG_FULL; > } > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > } > > ppc_hash64_store_hpte(cpu, pte_index + index, > @@ -165,7 +165,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, > token = ppc_hash64_start_access(cpu, ptex); > v = ppc_hash64_load_hpte0(cpu, token, 0); > r = ppc_hash64_load_hpte1(cpu, token, 0); > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > > if ((v & HPTE64_V_VALID) == 0 || > ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || > @@ -288,7 +288,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, > token = ppc_hash64_start_access(cpu, pte_index); > v = ppc_hash64_load_hpte0(cpu, token, 0); > r = ppc_hash64_load_hpte1(cpu, token, 0); > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > > if ((v & HPTE64_V_VALID) == 0 || > ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { > diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c > index 7b5200b..a9ae0b3 100644 > --- a/target-ppc/mmu-hash64.c > +++ b/target-ppc/mmu-hash64.c > @@ -36,10 +36,11 @@ > #endif > > /* > - * Used to indicate whether we have allocated htab in the > - * host kernel > + * Used to indicate that a CPU has it's hash page table (HPT) managed s/it's/its > + * within the host kernel > */ > -bool kvmppc_kern_htab; > +#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1) > + > /* > * SLB handling > */ > @@ -283,7 +284,11 @@ void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, > > cpu_synchronize_state(CPU(cpu)); > > - env->external_htab = hpt; > + if (hpt) { > + env->external_htab = hpt; > + } else { > + env->external_htab = MMU_HASH64_KVM_MANAGED_HPT; > + } > ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18), > &local_err); > if (local_err) { > @@ -396,25 +401,16 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) > hwaddr pte_offset; > > pte_offset = pte_index * HASH_PTE_SIZE_64; > - if (kvmppc_kern_htab) { > + if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { > /* > * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. > */ > token = kvmppc_hash64_read_pteg(cpu, pte_index); > - if (token) { > - return token; > - } > + } else if (cpu->env.external_htab) { > /* > - * pteg read failed, even though we have allocated htab via > - * kvmppc_reset_htab. > + * HTAB is controlled by QEMU. Just point to the internally > + * accessible PTEG. > */ > - return 0; > - } > - /* > - * HTAB is controlled by QEMU. Just point to the internally > - * accessible PTEG. > - */ > - if (cpu->env.external_htab) { > token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset; > } else if (cpu->env.htab_base) { > token = cpu->env.htab_base + pte_offset; > @@ -422,9 +418,9 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) > return token; > } > > -void ppc_hash64_stop_access(uint64_t token) > +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token) > { > - if (kvmppc_kern_htab) { > + if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { > kvmppc_hash64_free_pteg(token); > } > } > @@ -453,11 +449,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, > && HPTE64_V_COMPARE(pte0, ptem)) { > pte->pte0 = pte0; > pte->pte1 = pte1; > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > return (pte_index + i) * HASH_PTE_SIZE_64; > } > } > - ppc_hash64_stop_access(token); > + ppc_hash64_stop_access(cpu, token); > /* > * We didn't find a valid entry. > */ > @@ -772,7 +768,7 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu, > { > CPUPPCState *env = &cpu->env; > > - if (kvmppc_kern_htab) { > + if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) { > kvmppc_hash64_write_pte(env, pte_index, pte0, pte1); > return; > } > diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h > index 138cd7e..9bf8b9b 100644 > --- a/target-ppc/mmu-hash64.h > +++ b/target-ppc/mmu-hash64.h > @@ -90,16 +90,13 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, > #define HPTE64_V_1TB_SEG 0x4000000000000000ULL > #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL > > - > -extern bool kvmppc_kern_htab; > - > void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value, > Error **errp); > void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, > Error **errp); > > uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index); > -void ppc_hash64_stop_access(uint64_t token); > +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token); > > static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu, > uint64_t token, int index) > @@ -108,7 +105,7 @@ static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu, > uint64_t addr; > > addr = token + (index * HASH_PTE_SIZE_64); > - if (kvmppc_kern_htab || env->external_htab) { > + if (env->external_htab) { > return ldq_p((const void *)(uintptr_t)addr); > } else { > return ldq_phys(CPU(cpu)->as, addr); > @@ -122,7 +119,7 @@ static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu, > uint64_t addr; > > addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2; > - if (kvmppc_kern_htab || env->external_htab) { > + if (env->external_htab) { > return ldq_p((const void *)(uintptr_t)addr); > } else { > return ldq_phys(CPU(cpu)->as, addr);
On Mon, Mar 07, 2016 at 02:41:34PM +0100, Greg Kurz wrote: > On Mon, 7 Mar 2016 13:26:26 +1100 > David Gibson <david@gibson.dropbear.id.au> wrote: > > > fa48b43 "target-ppc: Remove hack for ppc_hash64_load_hpte*() with HV KVM" > > purports to remove a hack in the handling of hash page tables (HPTs) > > managed by KVM instead of qemu. However, it actually went in the wrong > > direction. > > > > That patch requires anything looking for an external HPT (that is one not > > managed by the guest itself) to check both env->external_htab (for a qemu > > managed HPT) and kvmppc_kern_htab (for a KVM managed HPT). That's a > > problem because kvmppc_kern_htab is local to mmu-hash64.c, but some places > > which need to check for an external HPT are outside that, such as > > kvm_arch_get_registers(). The latter was subtly broken by the earlier > > patch such that gdbstub can no longer access memory. > > > > Basically a KVM managed HPT is much more like a qemu managed HPT than it is > > like a guest managed HPT, so the original "hack" was actually on the right > > track. > > > > This partially reverts fa48b43, so we again mark a KVM managed external HPT > > by putting a special but non-NULL value in env->external_htab. It then > > goes further, using that marker to eliminate the kvmppc_kern_htab global > > entirely. The ppc_hash64_set_external_hpt() helper function is extended > > to set that marker if passed a NULL value (if you're setting an external > > HPT, but don't have an actual HPT to set, the assumption is that it must > > be a KVM managed HPT). > > > > This also has some flow-on changes to the HPT access helpers, required by > > the above changes. > > > > Reported-by: Greg Kurz <gkurz@linux.vnet.ibm.com> > > Signed-off-by: David Gibson <david@gibson.dropbear.id.au> > > Reviewed-by: Thomas Huth <thuth@redhat.com> > > --- > > Typo in comment in target-ppc/mmu-hash64.c (see below). > > Apart from that, you get: > > Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com> > > and also... > > without this patch: > > 0x00000000100009b8 in main (argc=<error reading variable: Cannot access memory at address 0x3fffc03ce270>, argv=<error reading variable: Cannot access memory at address 0x3fffc03ce278>) at fp.c:11 > > with this patch: > > 0x00000000100009b8 in main (argc=4, argv=0x3fffc7fcfd18) at fp.c:11 > > Just to be sure, I've also tested with TCG: no regression. > > Thanks for the fix ! > > Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com> Thanks. > > hw/ppc/spapr.c | 3 +-- > > hw/ppc/spapr_hcall.c | 10 +++++----- > > target-ppc/mmu-hash64.c | 40 ++++++++++++++++++---------------------- > > target-ppc/mmu-hash64.h | 9 +++------ > > 4 files changed, 27 insertions(+), 35 deletions(-) > > > > diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c > > index 72a018b..43708a2 100644 > > --- a/hw/ppc/spapr.c > > +++ b/hw/ppc/spapr.c > > @@ -1091,7 +1091,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, > > } > > > > spapr->htab_shift = shift; > > - kvmppc_kern_htab = true; > > + spapr->htab = NULL; > > } else { > > /* kernel-side HPT not needed, allocate in userspace instead */ > > size_t size = 1ULL << shift; > > @@ -1106,7 +1106,6 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, > > > > memset(spapr->htab, 0, size); > > spapr->htab_shift = shift; > > - kvmppc_kern_htab = false; > > > > for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { > > DIRTY_HPTE(HPTE(spapr->htab, i)); > > diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c > > index 1733482..b2b1b93 100644 > > --- a/hw/ppc/spapr_hcall.c > > +++ b/hw/ppc/spapr_hcall.c > > @@ -122,17 +122,17 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, > > break; > > } > > } > > - ppc_hash64_stop_access(token); > > + ppc_hash64_stop_access(cpu, token); > > if (index == 8) { > > return H_PTEG_FULL; > > } > > } else { > > token = ppc_hash64_start_access(cpu, pte_index); > > if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { > > - ppc_hash64_stop_access(token); > > + ppc_hash64_stop_access(cpu, token); > > return H_PTEG_FULL; > > } > > - ppc_hash64_stop_access(token); > > + ppc_hash64_stop_access(cpu, token); > > } > > > > ppc_hash64_store_hpte(cpu, pte_index + index, > > @@ -165,7 +165,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, > > token = ppc_hash64_start_access(cpu, ptex); > > v = ppc_hash64_load_hpte0(cpu, token, 0); > > r = ppc_hash64_load_hpte1(cpu, token, 0); > > - ppc_hash64_stop_access(token); > > + ppc_hash64_stop_access(cpu, token); > > > > if ((v & HPTE64_V_VALID) == 0 || > > ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || > > @@ -288,7 +288,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, > > token = ppc_hash64_start_access(cpu, pte_index); > > v = ppc_hash64_load_hpte0(cpu, token, 0); > > r = ppc_hash64_load_hpte1(cpu, token, 0); > > - ppc_hash64_stop_access(token); > > + ppc_hash64_stop_access(cpu, token); > > > > if ((v & HPTE64_V_VALID) == 0 || > > ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { > > diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c > > index 7b5200b..a9ae0b3 100644 > > --- a/target-ppc/mmu-hash64.c > > +++ b/target-ppc/mmu-hash64.c > > @@ -36,10 +36,11 @@ > > #endif > > > > /* > > - * Used to indicate whether we have allocated htab in the > > - * host kernel > > + * Used to indicate that a CPU has it's hash page table (HPT) managed > > s/it's/its Oh dear. That mispelling really annoys me, so it's an embarrassing error. Thanks for the catch.
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 72a018b..43708a2 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1091,7 +1091,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, } spapr->htab_shift = shift; - kvmppc_kern_htab = true; + spapr->htab = NULL; } else { /* kernel-side HPT not needed, allocate in userspace instead */ size_t size = 1ULL << shift; @@ -1106,7 +1106,6 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, memset(spapr->htab, 0, size); spapr->htab_shift = shift; - kvmppc_kern_htab = false; for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { DIRTY_HPTE(HPTE(spapr->htab, i)); diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 1733482..b2b1b93 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -122,17 +122,17 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, break; } } - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); if (index == 8) { return H_PTEG_FULL; } } else { token = ppc_hash64_start_access(cpu, pte_index); if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); return H_PTEG_FULL; } - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); } ppc_hash64_store_hpte(cpu, pte_index + index, @@ -165,7 +165,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, token = ppc_hash64_start_access(cpu, ptex); v = ppc_hash64_load_hpte0(cpu, token, 0); r = ppc_hash64_load_hpte1(cpu, token, 0); - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); if ((v & HPTE64_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || @@ -288,7 +288,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, token = ppc_hash64_start_access(cpu, pte_index); v = ppc_hash64_load_hpte0(cpu, token, 0); r = ppc_hash64_load_hpte1(cpu, token, 0); - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); if ((v & HPTE64_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c index 7b5200b..a9ae0b3 100644 --- a/target-ppc/mmu-hash64.c +++ b/target-ppc/mmu-hash64.c @@ -36,10 +36,11 @@ #endif /* - * Used to indicate whether we have allocated htab in the - * host kernel + * Used to indicate that a CPU has it's hash page table (HPT) managed + * within the host kernel */ -bool kvmppc_kern_htab; +#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1) + /* * SLB handling */ @@ -283,7 +284,11 @@ void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, cpu_synchronize_state(CPU(cpu)); - env->external_htab = hpt; + if (hpt) { + env->external_htab = hpt; + } else { + env->external_htab = MMU_HASH64_KVM_MANAGED_HPT; + } ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18), &local_err); if (local_err) { @@ -396,25 +401,16 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) hwaddr pte_offset; pte_offset = pte_index * HASH_PTE_SIZE_64; - if (kvmppc_kern_htab) { + if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { /* * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. */ token = kvmppc_hash64_read_pteg(cpu, pte_index); - if (token) { - return token; - } + } else if (cpu->env.external_htab) { /* - * pteg read failed, even though we have allocated htab via - * kvmppc_reset_htab. + * HTAB is controlled by QEMU. Just point to the internally + * accessible PTEG. */ - return 0; - } - /* - * HTAB is controlled by QEMU. Just point to the internally - * accessible PTEG. - */ - if (cpu->env.external_htab) { token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset; } else if (cpu->env.htab_base) { token = cpu->env.htab_base + pte_offset; @@ -422,9 +418,9 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) return token; } -void ppc_hash64_stop_access(uint64_t token) +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token) { - if (kvmppc_kern_htab) { + if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { kvmppc_hash64_free_pteg(token); } } @@ -453,11 +449,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, && HPTE64_V_COMPARE(pte0, ptem)) { pte->pte0 = pte0; pte->pte1 = pte1; - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); return (pte_index + i) * HASH_PTE_SIZE_64; } } - ppc_hash64_stop_access(token); + ppc_hash64_stop_access(cpu, token); /* * We didn't find a valid entry. */ @@ -772,7 +768,7 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu, { CPUPPCState *env = &cpu->env; - if (kvmppc_kern_htab) { + if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) { kvmppc_hash64_write_pte(env, pte_index, pte0, pte1); return; } diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h index 138cd7e..9bf8b9b 100644 --- a/target-ppc/mmu-hash64.h +++ b/target-ppc/mmu-hash64.h @@ -90,16 +90,13 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, #define HPTE64_V_1TB_SEG 0x4000000000000000ULL #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL - -extern bool kvmppc_kern_htab; - void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value, Error **errp); void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, Error **errp); uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index); -void ppc_hash64_stop_access(uint64_t token); +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token); static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu, uint64_t token, int index) @@ -108,7 +105,7 @@ static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu, uint64_t addr; addr = token + (index * HASH_PTE_SIZE_64); - if (kvmppc_kern_htab || env->external_htab) { + if (env->external_htab) { return ldq_p((const void *)(uintptr_t)addr); } else { return ldq_phys(CPU(cpu)->as, addr); @@ -122,7 +119,7 @@ static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu, uint64_t addr; addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2; - if (kvmppc_kern_htab || env->external_htab) { + if (env->external_htab) { return ldq_p((const void *)(uintptr_t)addr); } else { return ldq_phys(CPU(cpu)->as, addr);