From patchwork Mon Apr 4 18:31:49 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hans de Goede X-Patchwork-Id: 605968 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from theia.denx.de (theia.denx.de [85.214.87.163]) by ozlabs.org (Postfix) with ESMTP id 3qf0vg5v43z9s2G for ; Tue, 5 Apr 2016 04:32:15 +1000 (AEST) Received: from localhost (localhost [127.0.0.1]) by theia.denx.de (Postfix) with ESMTP id 88EBFA7680; Mon, 4 Apr 2016 20:32:12 +0200 (CEST) Received: from theia.denx.de ([127.0.0.1]) by localhost (theia.denx.de [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id fb_YYRoSwyoX; Mon, 4 Apr 2016 20:32:12 +0200 (CEST) Received: from theia.denx.de (localhost [127.0.0.1]) by theia.denx.de (Postfix) with ESMTP id B5F1FA76D8; Mon, 4 Apr 2016 20:32:07 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by theia.denx.de (Postfix) with ESMTP id B46E0A7519 for ; Mon, 4 Apr 2016 20:32:02 +0200 (CEST) Received: from theia.denx.de ([127.0.0.1]) by localhost (theia.denx.de [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id cKkBYrmKu4r8 for ; Mon, 4 Apr 2016 20:32:02 +0200 (CEST) X-policyd-weight: NOT_IN_SBL_XBL_SPAMHAUS=-1.5 NOT_IN_SPAMCOP=-1.5 NOT_IN_BL_NJABL=-1.5 (only DNSBL check requested) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by theia.denx.de (Postfix) with ESMTPS id 3403EA748A for ; Mon, 4 Apr 2016 20:32:00 +0200 (CEST) Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id E98CF1E3E; Mon, 4 Apr 2016 18:31:58 +0000 (UTC) Received: from shalem.localdomain.com (vpn1-6-205.ams2.redhat.com [10.36.6.205]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u34IVqx2010043; Mon, 4 Apr 2016 14:31:56 -0400 From: Hans de Goede To: Albert ARIBAUD Date: Mon, 4 Apr 2016 20:31:49 +0200 Message-Id: <1459794709-21601-2-git-send-email-hdegoede@redhat.com> In-Reply-To: <1459794709-21601-1-git-send-email-hdegoede@redhat.com> References: <1459794709-21601-1-git-send-email-hdegoede@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Cc: Ian Campbell , Tom Rini , u-boot@lists.denx.de Subject: [U-Boot] [PATCH 2/2] arm: Replace v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL) with asm code X-BeenThere: u-boot@lists.denx.de X-Mailman-Version: 2.1.15 Precedence: list List-Id: U-Boot discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: u-boot-bounces@lists.denx.de Sender: "U-Boot" Lets be consistent and also replace v7_maint_dcache_all() with asm code for the invalidate case. Signed-off-by: Hans de Goede --- arch/arm/cpu/armv7/cache_v7.c | 100 ++------------------------------------ arch/arm/cpu/armv7/cache_v7_asm.S | 70 ++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 96 deletions(-) diff --git a/arch/arm/cpu/armv7/cache_v7.c b/arch/arm/cpu/armv7/cache_v7.c index dd07ba1..dc309da 100644 --- a/arch/arm/cpu/armv7/cache_v7.c +++ b/arch/arm/cpu/armv7/cache_v7.c @@ -10,15 +10,14 @@ #include #include -#define ARMV7_DCACHE_INVAL_ALL 1 -#define ARMV7_DCACHE_CLEAN_INVAL_ALL 2 -#define ARMV7_DCACHE_INVAL_RANGE 3 -#define ARMV7_DCACHE_CLEAN_INVAL_RANGE 4 +#define ARMV7_DCACHE_INVAL_RANGE 1 +#define ARMV7_DCACHE_CLEAN_INVAL_RANGE 2 #ifndef CONFIG_SYS_DCACHE_OFF /* Asm functions from cache_v7_asm.S */ void v7_flush_dcache_all(void); +void v7_invalidate_dcache_all(void); static int check_cache_range(unsigned long start, unsigned long stop) { @@ -37,18 +36,6 @@ static int check_cache_range(unsigned long start, unsigned long stop) return ok; } -/* - * Write the level and type you want to Cache Size Selection Register(CSSELR) - * to get size details from Current Cache Size ID Register(CCSIDR) - */ -static void set_csselr(u32 level, u32 type) -{ - u32 csselr = level << 1 | type; - - /* Write to Cache Size Selection Register(CSSELR) */ - asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); -} - static u32 get_ccsidr(void) { u32 ccsidr; @@ -58,85 +45,6 @@ static u32 get_ccsidr(void) return ccsidr; } -static u32 get_clidr(void) -{ - u32 clidr; - - /* Read current CP15 Cache Level ID Register */ - asm volatile ("mrc p15,1,%0,c0,c0,1" : "=r" (clidr)); - return clidr; -} - -static void v7_inval_dcache_level_setway(u32 level, u32 num_sets, - u32 num_ways, u32 way_shift, - u32 log2_line_len) -{ - int way, set; - u32 setway; - - /* - * For optimal assembly code: - * a. count down - * b. have bigger loop inside - */ - for (way = num_ways - 1; way >= 0 ; way--) { - for (set = num_sets - 1; set >= 0; set--) { - setway = (level << 1) | (set << log2_line_len) | - (way << way_shift); - /* Invalidate data/unified cache line by set/way */ - asm volatile (" mcr p15, 0, %0, c7, c6, 2" - : : "r" (setway)); - } - } - /* DSB to make sure the operation is complete */ - DSB; -} - -static void v7_maint_dcache_level_setway(u32 level, u32 operation) -{ - u32 ccsidr; - u32 num_sets, num_ways, log2_line_len, log2_num_ways; - u32 way_shift; - - set_csselr(level, ARMV7_CSSELR_IND_DATA_UNIFIED); - - ccsidr = get_ccsidr(); - - log2_line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> - CCSIDR_LINE_SIZE_OFFSET) + 2; - /* Converting from words to bytes */ - log2_line_len += 2; - - num_ways = ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> - CCSIDR_ASSOCIATIVITY_OFFSET) + 1; - num_sets = ((ccsidr & CCSIDR_NUM_SETS_MASK) >> - CCSIDR_NUM_SETS_OFFSET) + 1; - /* - * According to ARMv7 ARM number of sets and number of ways need - * not be a power of 2 - */ - log2_num_ways = log_2_n_round_up(num_ways); - - way_shift = (32 - log2_num_ways); - v7_inval_dcache_level_setway(level, num_sets, num_ways, - way_shift, log2_line_len); -} - -static void v7_maint_dcache_all(u32 operation) -{ - u32 level, cache_type, level_start_bit = 0; - u32 clidr = get_clidr(); - - for (level = 0; level < 7; level++) { - cache_type = (clidr >> level_start_bit) & 0x7; - if ((cache_type == ARMV7_CLIDR_CTYPE_DATA_ONLY) || - (cache_type == ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA) || - (cache_type == ARMV7_CLIDR_CTYPE_UNIFIED)) - v7_maint_dcache_level_setway(level, operation); - level_start_bit += 3; - } -} - static void v7_dcache_clean_inval_range(u32 start, u32 stop, u32 line_len) { u32 mva; @@ -223,7 +131,7 @@ static void v7_inval_tlb(void) void invalidate_dcache_all(void) { - v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL); + v7_invalidate_dcache_all(); v7_outer_cache_inval_all(); } diff --git a/arch/arm/cpu/armv7/cache_v7_asm.S b/arch/arm/cpu/armv7/cache_v7_asm.S index 1b1daf8..e47571f 100644 --- a/arch/arm/cpu/armv7/cache_v7_asm.S +++ b/arch/arm/cpu/armv7/cache_v7_asm.S @@ -83,3 +83,73 @@ ENTRY(v7_flush_dcache_all) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) bx lr ENDPROC(v7_flush_dcache_all) + +/* + * v7_invalidate_dcache_all() + * + * Invalidate the whole D-cache. + * + * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) + * + * Note: copied from __v7_flush_dcache_all above with + * mcr p15, 0, r11, c7, c14, 2 + * Replaced with: + * mcr p15, 0, r11, c7, c6, 2 + */ +ENTRY(__v7_invalidate_dcache_all) + dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr + mov r3, r0, lsr #23 @ move LoC into position + ands r3, r3, #7 << 1 @ extract LoC*2 from clidr + beq inval_finished @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +inval_levels: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt inval_skip @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + movw r4, #0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + movw r7, #0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +inval_loop1: + mov r9, r7 @ create working copy of max index +inval_loop2: + ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11 + THUMB( lsl r6, r4, r5 ) + THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 + ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11 + THUMB( lsl r6, r9, r2 ) + THUMB( orr r11, r11, r6 ) @ factor index number into r11 + mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way + subs r9, r9, #1 @ decrement the index + bge inval_loop2 + subs r4, r4, #1 @ decrement the way + bge inval_loop1 +inval_skip: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt inval_levels +inval_finished: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + dsb st + isb + bx lr +ENDPROC(__v7_invalidate_dcache_all) + +ENTRY(v7_invalidate_dcache_all) + ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) + bl __v7_invalidate_dcache_all + ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) + bx lr +ENDPROC(v7_invalidate_dcache_all)