From patchwork Fri Nov 6 22:41:39 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Thomas Gleixner X-Patchwork-Id: 37896 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by ozlabs.org (Postfix) with ESMTP id 01064B7B75 for ; Sat, 7 Nov 2009 09:46:36 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933196AbZKFWmE (ORCPT ); Fri, 6 Nov 2009 17:42:04 -0500 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S933187AbZKFWmD (ORCPT ); Fri, 6 Nov 2009 17:42:03 -0500 Received: from www.tglx.de ([62.245.132.106]:54022 "EHLO www.tglx.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932922AbZKFWl4 (ORCPT ); Fri, 6 Nov 2009 17:41:56 -0500 Received: from localhost.localdomain (www.tglx.de [127.0.0.1]) by www.tglx.de (8.13.8/8.13.8/TGLX-2007100201) with ESMTP id nA6Mfd9w016678; Fri, 6 Nov 2009 23:41:40 +0100 Message-Id: <20091106223806.721945005@linutronix.de> User-Agent: quilt/0.47-1 Date: Fri, 06 Nov 2009 22:41:39 -0000 From: Thomas Gleixner To: LKML Cc: Ingo Molnar , Peter Zijlstra , "David S. Miller" , sparclinux@vger.kernel.org Subject: [patch 08/16] sparc: Make atomic locks raw References: <20091106223547.784916750@linutronix.de> Content-Disposition: inline; filename=sparc-make-atomic-locks-raw.patch X-Virus-Scanned: clamav-milter 0.95.1 at www.tglx.de X-Virus-Status: Clean X-Spam-Status: No, score=-1.7 required=5.0 tests=ALL_TRUSTED,AWL autolearn=failed version=3.2.4 X-Spam-Checker-Version: SpamAssassin 3.2.4 (2008-01-01) on www.tglx.de Sender: sparclinux-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: sparclinux@vger.kernel.org SPIN_LOCK_UNLOCKED is deprecated and the locks which protect the atomic operations have no dependency on other locks and the code is well tested so the conversion to a raw lock is safe. Make the lock array static while at it. Signed-off-by: Thomas Gleixner Cc: David S. Miller Cc: sparclinux@vger.kernel.org --- arch/sparc/lib/atomic32.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe sparclinux" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Index: linux-2.6/arch/sparc/lib/atomic32.c =================================================================== --- linux-2.6.orig/arch/sparc/lib/atomic32.c +++ linux-2.6/arch/sparc/lib/atomic32.c @@ -15,8 +15,8 @@ #define ATOMIC_HASH_SIZE 4 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) -spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED +static raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #else /* SMP */ @@ -31,11 +31,11 @@ int __atomic_add_return(int i, atomic_t { int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = (v->counter += i); - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(__atomic_add_return); @@ -45,12 +45,12 @@ int atomic_cmpxchg(atomic_t *v, int old, int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (likely(ret == old)) v->counter = new; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(atomic_cmpxchg); @@ -60,11 +60,11 @@ int atomic_add_unless(atomic_t *v, int a int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (ret != u) v->counter += a; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret != u; } EXPORT_SYMBOL(atomic_add_unless); @@ -74,9 +74,9 @@ void atomic_set(atomic_t *v, int i) { unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); v->counter = i; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); } EXPORT_SYMBOL(atomic_set); @@ -84,10 +84,10 @@ unsigned long ___set_bit(unsigned long * { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old | mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -97,10 +97,10 @@ unsigned long ___clear_bit(unsigned long { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old & ~mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -110,10 +110,10 @@ unsigned long ___change_bit(unsigned lon { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old ^ mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -124,10 +124,10 @@ unsigned long __cmpxchg_u32(volatile u32 unsigned long flags; u32 prev; - spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(ptr), flags); if ((prev = *ptr) == old) *ptr = new; - spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); return (unsigned long)prev; }