From patchwork Tue Feb 15 04:56:23 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: qemu@gibson.dropbear.id.au X-Patchwork-Id: 83183 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 26FBFB7102 for ; Tue, 15 Feb 2011 16:20:55 +1100 (EST) Received: from localhost ([127.0.0.1]:55041 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PpDJx-0001Mj-BG for incoming@patchwork.ozlabs.org; Tue, 15 Feb 2011 00:19:29 -0500 Received: from [140.186.70.92] (port=53460 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PpCyJ-0008Vq-VV for qemu-devel@nongnu.org; Mon, 14 Feb 2011 23:57:10 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PpCyH-0001gO-GB for qemu-devel@nongnu.org; Mon, 14 Feb 2011 23:57:07 -0500 Received: from ozlabs.org ([203.10.76.45]:34736) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PpCyG-0001aK-QW for qemu-devel@nongnu.org; Mon, 14 Feb 2011 23:57:05 -0500 Received: by ozlabs.org (Postfix, from userid 1007) id 8C99CB712D; Tue, 15 Feb 2011 15:56:53 +1100 (EST) From: qemu@gibson.dropbear.id.au To: qemu-devel@nongnu.org Date: Tue, 15 Feb 2011 15:56:23 +1100 Message-Id: <1297745799-26148-13-git-send-email-qemu@gibson.dropbear.id.au> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1297745799-26148-1-git-send-email-qemu@gibson.dropbear.id.au> References: <1297745799-26148-1-git-send-email-qemu@gibson.dropbear.id.au> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6 (newer, 3) X-Received-From: 203.10.76.45 Cc: paulus@samba.org, agraf@suse.de, anton@samba.org Subject: [Qemu-devel] [PATCH 12/28] Support 1T segments on ppc X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org From: David Gibson Traditionally, the "segments" used for the two-stage translation used on powerpc MMUs were 256MB in size. This was the only option on all hash page table based 32-bit powerpc cpus, and on the earlier 64-bit hash page table based cpus. However, newer 64-bit cpus also permit 1TB segments This patch adds support for 1TB segment translation to the qemu code. Signed-off-by: David Gibson --- target-ppc/cpu.h | 7 ++++++ target-ppc/helper.c | 54 +++++++++++++++++++++++++++++++++++--------------- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 71f8d72..9abf4a9 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -114,6 +114,7 @@ enum powerpc_mmu_t { POWERPC_MMU_601 = 0x0000000A, #if defined(TARGET_PPC64) #define POWERPC_MMU_64 0x00010000 +#define POWERPC_MMU_1TSEG 0x00020000 /* 64 bits PowerPC MMU */ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, /* 620 variant (no segment exceptions) */ @@ -382,9 +383,11 @@ struct ppc_slb_t { /* Bits in the SLB VSID word */ #define SLB_VSID_SHIFT 12 +#define SLB_VSID_SHIFT_1T 24 #define SLB_VSID_SSIZE_SHIFT 62 #define SLB_VSID_B 0xc000000000000000ULL #define SLB_VSID_B_256M 0x0000000000000000ULL +#define SLB_VSID_B_1T 0x4000000000000000ULL #define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL #define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID) #define SLB_VSID_KS 0x0000000000000800ULL @@ -398,6 +401,10 @@ struct ppc_slb_t { #define SEGMENT_SHIFT_256M 28 #define SEGMENT_MASK_256M ~((1ULL << SEGMENT_SHIFT_256M) - 1) +#define SEGMENT_SHIFT_1T 40 +#define SEGMENT_MASK_1T ~((1ULL << SEGMENT_SHIFT_1T) - 1) + + /*****************************************************************************/ /* Machine state register bits definition */ #define MSR_SF 63 /* Sixty-four-bit mode hflags */ diff --git a/target-ppc/helper.c b/target-ppc/helper.c index 111675d..3e3b5da 100644 --- a/target-ppc/helper.c +++ b/target-ppc/helper.c @@ -671,19 +671,26 @@ static inline int find_pte(CPUState *env, mmu_ctx_t *ctx, int h, int rw, #if defined(TARGET_PPC64) static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) { - uint64_t esid; + uint64_t esid_256M, esid_1T; int n; LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); - esid = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; + esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; + esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; for (n = 0; n < env->slb_nr; n++) { ppc_slb_t *slb = &env->slb[n]; LOG_SLB("%s: slot %d %016" PRIx64 " %016" PRIx64 "\n", __func__, n, slb->esid, slb->vsid); - if (slb->esid == esid) { + /* We check for 1T matches on all MMUs here - if the MMU + * doesn't have 1T segment support, we will have prevented 1T + * entries from being inserted in the slbmte code. */ + if ( ((slb->esid == esid_256M) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) + || ((slb->esid == esid_1T) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T)) ) { return slb; } } @@ -736,16 +743,19 @@ void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0) int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs) { int slot = rb & 0xfff; - uint64_t esid = rb & ~0xfff; ppc_slb_t *slb = &env->slb[slot]; - - if (slot >= env->slb_nr) { - return -1; - } - - slb->esid = esid; + + if (rb & (0x1000 - env->slb_nr)) + return -1; /* Reserved bits set or slot too high */ + if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) + return -1; /* Bad segment size */ + if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) + return -1; /* 1T segment on MMU that doesn't support it */ + + /* Mask out the slot number as we store the entry */ + slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); slb->vsid = rs; - + LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, rb, rs, slb->esid, slb->vsid); @@ -795,6 +805,7 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, if (env->mmu_model & POWERPC_MMU_64) { ppc_slb_t *slb; target_ulong pageaddr; + int segment_bits; LOG_MMU("Check SLBs\n"); slb = slb_lookup(env, eaddr); @@ -802,7 +813,14 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, return -5; } - vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; + if (slb->vsid & SLB_VSID_B) { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; + segment_bits = 40; + } else { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; + segment_bits = 28; + } + target_page_bits = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) @@ -810,11 +828,15 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, ds = 0; ctx->nx = !!(slb->vsid & SLB_VSID_N); - pageaddr = eaddr & ((1ULL << 28) - (1ULL << target_page_bits)); - /* XXX: this is false for 1 TB segments */ - hash = vsid ^ (pageaddr >> target_page_bits); + pageaddr = eaddr & ((1ULL << segment_bits) + - (1ULL << target_page_bits)); + if (slb->vsid & SLB_VSID_B) + hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); + else + hash = vsid ^ (pageaddr >> target_page_bits); /* Only 5 bits of the page index are used in the AVPN */ - ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | ((pageaddr >> 16) & 0x0F80); + ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | + ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); } else #endif /* defined(TARGET_PPC64) */ {