diff mbox series

[3/4] sparc32: Add atomic bitops support using CAS

Message ID 20231229200825.GC4034411@ravnborg.org
State New
Headers show
Series sparc32: Use CAS for atomic support | expand

Commit Message

Sam Ravnborg Dec. 29, 2023, 8:08 p.m. UTC
This implements the atomic bit operations using the CAS instruction
so they are atomic.

The implementation uses a single asm helper, to make the code as readable
as possible. The implementation is done inline in bitops/atomic.h to
mirror the structure used in asm-generic.
As an added benefit the bitops can be instrumented.

The generated code is more compact with the majority implemented in C as
this allows the compiler to do optimizations especially when the
arguments passed are constant.

The old emulated bitops implementation is no longer used and deleted.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Arnd Bergmann <arnd@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
---
 arch/sparc/include/asm/bitops/atomic_32.h | 124 ++++++++++++++++++++++
 arch/sparc/include/asm/bitops_32.h        |  71 +------------
 arch/sparc/lib/atomic32.c                 |  39 -------
 3 files changed, 125 insertions(+), 109 deletions(-)
 create mode 100644 arch/sparc/include/asm/bitops/atomic_32.h
diff mbox series

Patch

diff --git a/arch/sparc/include/asm/bitops/atomic_32.h b/arch/sparc/include/asm/bitops/atomic_32.h
new file mode 100644
index 000000000000..b9e33d21b58d
--- /dev/null
+++ b/arch/sparc/include/asm/bitops/atomic_32.h
@@ -0,0 +1,124 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPARC_BITOPS_ATOMIC_H_
+#define __ASM_SPARC_BITOPS_ATOMIC_H_
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+
+#include <asm/asi.h>
+#include <asm/barrier.h>
+
+static __always_inline
+int __boa_casa(volatile unsigned long *p,
+	       unsigned long check,
+	       unsigned long swap)
+{
+	// casa [p], check, swap
+	// check == swap for success, otherwise try again
+	asm volatile("casa      [%2] 0xb, %3, %0"
+		     : "=&r" (swap)
+		     : "0" (swap), "r" (p), "r" (check)
+		     : "memory");
+
+	return swap;
+}
+
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check | mask;
+	} while (__boa_casa(p, check, swap) != check);
+}
+
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check & ~mask;
+	} while (__boa_casa(p, check, swap) != check);
+}
+
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check ^ mask;
+	} while (__boa_casa(p, check, swap) != check);
+}
+
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check | mask;
+	} while (__boa_casa(p, check, swap) != check);
+
+	return !!(check & mask);
+}
+
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check & ~mask;
+	} while (__boa_casa(p, check, swap) != check);
+
+	return !!(check & mask);
+}
+
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long check;
+	unsigned long swap;
+
+	p += BIT_WORD(nr);
+
+	do {
+		check = *p;
+		swap = check ^ mask;
+	} while (__boa_casa(p, check, swap) != check);
+
+	return !!(check & mask);
+}
+
+#include <asm-generic/bitops/instrumented-atomic.h>
+
+#endif /* __ASM_SPARC_BITOPS_ATOMIC_H_ */
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 3448c191b484..34279e9572a4 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -19,76 +19,7 @@ 
 #error only <linux/bitops.h> can be included directly
 #endif
 
-unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask);
-unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask);
-unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask);
-
-/*
- * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
- * is in the highest of the four bytes and bit '31' is the high bit
- * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
- * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
- */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	return sp32___set_bit(ADDR, mask) != 0;
-}
-
-static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	(void) sp32___set_bit(ADDR, mask);
-}
-
-static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	return sp32___clear_bit(ADDR, mask) != 0;
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	(void) sp32___clear_bit(ADDR, mask);
-}
-
-static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	return sp32___change_bit(ADDR, mask) != 0;
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
-{
-	unsigned long *ADDR, mask;
-
-	ADDR = ((unsigned long *) addr) + (nr >> 5);
-	mask = 1 << (nr & 31);
-
-	(void) sp32___change_bit(ADDR, mask);
-}
-
+#include <asm/bitops/atomic_32.h>
 #include <asm-generic/bitops/non-atomic.h>
 
 #include <asm-generic/bitops/ffz.h>
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index f378471adeca..ed778f7ebe97 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -119,42 +119,3 @@  void arch_atomic_set(atomic_t *v, int i)
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 }
 EXPORT_SYMBOL(arch_atomic_set);
-
-unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
-{
-	unsigned long old, flags;
-
-	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
-	old = *addr;
-	*addr = old | mask;
-	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
-
-	return old & mask;
-}
-EXPORT_SYMBOL(sp32___set_bit);
-
-unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
-{
-	unsigned long old, flags;
-
-	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
-	old = *addr;
-	*addr = old & ~mask;
-	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
-
-	return old & mask;
-}
-EXPORT_SYMBOL(sp32___clear_bit);
-
-unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
-{
-	unsigned long old, flags;
-
-	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
-	old = *addr;
-	*addr = old ^ mask;
-	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
-
-	return old & mask;
-}
-EXPORT_SYMBOL(sp32___change_bit);