diff mbox series

[v3,4/5] arch,locking/atomic: hexagon: add arch_cmpxchg[64]_local

Message ID 20231121142347.241356-5-wuqiang.matt@bytedance.com
State New
Headers show
Series arch,locking/atomic: add arch_cmpxchg[64]_local | expand

Commit Message

wuqiang.matt Nov. 21, 2023, 2:23 p.m. UTC
hexagonc hasn't arch_cmpxhg_local implemented, which causes
building failures for any references of try_cmpxchg_local,
reported by the kernel test robot.

This patch implements arch_cmpxchg[64]_local with the native
cmpxchg variant if the corresponding data size is supported,
otherwise generci_cmpxchg[64]_local is to be used.

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-lkp@intel.com/

Signed-off-by: wuqiang.matt <wuqiang.matt@bytedance.com>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
 arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
 1 file changed, 50 insertions(+), 1 deletion(-)

Comments

Brian Cain Nov. 22, 2023, 4:55 p.m. UTC | #1
> -----Original Message-----
> From: wuqiang.matt <wuqiang.matt@bytedance.com>
> Sent: Tuesday, November 21, 2023 8:24 AM
> To: ubizjak@gmail.com; mark.rutland@arm.com; vgupta@kernel.org; Brian
> Cain <bcain@quicinc.com>; jonas@southpole.se;
> stefan.kristiansson@saunalahti.fi; shorne@gmail.com; chris@zankel.net;
> jcmvbkbc@gmail.com; geert@linux-m68k.org; andi.shyti@linux.intel.com;
> mingo@kernel.org; palmer@rivosinc.com; andrzej.hajda@intel.com;
> arnd@arndb.de; peterz@infradead.org; mhiramat@kernel.org
> Cc: linux-arch@vger.kernel.org; linux-snps-arc@lists.infradead.org; linux-
> kernel@vger.kernel.org; linux-hexagon@vger.kernel.org; linux-
> openrisc@vger.kernel.org; linux-trace-kernel@vger.kernel.org;
> mattwu@163.com; linux@roeck-us.net; wuqiang.matt
> <wuqiang.matt@bytedance.com>; kernel test robot <lkp@intel.com>
> Subject: [PATCH v3 4/5] arch,locking/atomic: hexagon: add
> arch_cmpxchg[64]_local
> 
> WARNING: This email originated from outside of Qualcomm. Please be wary of
> any links or attachments, and do not enable macros.
> 
> hexagonc hasn't arch_cmpxhg_local implemented, which causes
> building failures for any references of try_cmpxchg_local,
> reported by the kernel test robot.
> 
> This patch implements arch_cmpxchg[64]_local with the native
> cmpxchg variant if the corresponding data size is supported,
> otherwise generci_cmpxchg[64]_local is to be used.
> 
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-
> lkp@intel.com/
> 
> Signed-off-by: wuqiang.matt <wuqiang.matt@bytedance.com>
> Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
>  arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
>  1 file changed, 50 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/hexagon/include/asm/cmpxchg.h
> b/arch/hexagon/include/asm/cmpxchg.h
> index bf6cf5579cf4..302fa30f25aa 100644
> --- a/arch/hexagon/include/asm/cmpxchg.h
> +++ b/arch/hexagon/include/asm/cmpxchg.h
> @@ -8,6 +8,8 @@
>  #ifndef _ASM_CMPXCHG_H
>  #define _ASM_CMPXCHG_H
> 
> +#include <linux/build_bug.h>
> +
>  /*
>   * __arch_xchg - atomically exchange a register and a memory location
>   * @x: value to swap
> @@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int
> size)
>   *  variable casting.
>   */
> 
> -#define arch_cmpxchg(ptr, old, new)                            \
> +#define __cmpxchg_32(ptr, old, new)                            \
>  ({                                                             \
>         __typeof__(ptr) __ptr = (ptr);                          \
>         __typeof__(*(ptr)) __old = (old);                       \
>         __typeof__(*(ptr)) __new = (new);                       \
>         __typeof__(*(ptr)) __oldval = 0;                        \
>                                                                 \
> +       BUILD_BUG_ON(sizeof(*(ptr)) != 4);                      \
> +                                                               \
>         asm volatile(                                           \
>                 "1:     %0 = memw_locked(%1);\n"                \
>                 "       { P0 = cmp.eq(%0,%2);\n"                \
> @@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
>         __oldval;                                               \
>  })
> 
> +#define __cmpxchg(ptr, old, val, size)                         \
> +({                                                             \
> +       __typeof__(*(ptr)) oldval;                              \
> +                                                               \
> +       switch (size) {                                         \
> +       case 4:                                                 \
> +               oldval = __cmpxchg_32(ptr, old, val);           \
> +               break;                                          \
> +       default:                                                \
> +               BUILD_BUG();                                    \
> +               oldval = val;                                   \
> +               break;                                          \
> +       }                                                       \
> +                                                               \
> +       oldval;                                                 \
> +})
> +
> +#define arch_cmpxchg(ptr, o, n)        __cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
> +
> +/*
> + * always make arch_cmpxchg[64]_local available, native cmpxchg
> + * will be used if available, then generic_cmpxchg[64]_local
> + */
> +#include <asm-generic/cmpxchg-local.h>
> +
> +#define arch_cmpxchg_local(ptr, old, val)                      \
> +({                                                             \
> +       __typeof__(*(ptr)) __retval;                            \
> +       int __size = sizeof(*(ptr));                            \
> +                                                               \
> +       switch (__size) {                                       \
> +       case 4:                                                 \
> +               __retval = __cmpxchg_32(ptr, old, val);         \
> +               break;                                          \
> +       default:                                                \
> +               __retval = __generic_cmpxchg_local(ptr, old,    \
> +                                               val, __size);   \
> +               break;                                          \
> +       }                                                       \
> +                                                               \
> +       __retval;                                               \
> +})
> +
> +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr),
> (o), (n))
> +
>  #endif /* _ASM_CMPXCHG_H */
> --
> 2.40.1

Acked-by: Brian Cain <bcain@quicinc.com>
diff mbox series

Patch

diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..302fa30f25aa 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -8,6 +8,8 @@ 
 #ifndef _ASM_CMPXCHG_H
 #define _ASM_CMPXCHG_H
 
+#include <linux/build_bug.h>
+
 /*
  * __arch_xchg - atomically exchange a register and a memory location
  * @x: value to swap
@@ -51,13 +53,15 @@  __arch_xchg(unsigned long x, volatile void *ptr, int size)
  *  variable casting.
  */
 
-#define arch_cmpxchg(ptr, old, new)				\
+#define __cmpxchg_32(ptr, old, new)				\
 ({								\
 	__typeof__(ptr) __ptr = (ptr);				\
 	__typeof__(*(ptr)) __old = (old);			\
 	__typeof__(*(ptr)) __new = (new);			\
 	__typeof__(*(ptr)) __oldval = 0;			\
 								\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 4);			\
+								\
 	asm volatile(						\
 		"1:	%0 = memw_locked(%1);\n"		\
 		"	{ P0 = cmp.eq(%0,%2);\n"		\
@@ -72,4 +76,49 @@  __arch_xchg(unsigned long x, volatile void *ptr, int size)
 	__oldval;						\
 })
 
+#define __cmpxchg(ptr, old, val, size)				\
+({								\
+	__typeof__(*(ptr)) oldval;				\
+								\
+	switch (size) {						\
+	case 4:							\
+		oldval = __cmpxchg_32(ptr, old, val);		\
+		break;						\
+	default:						\
+		BUILD_BUG();					\
+		oldval = val;					\
+		break;						\
+	}							\
+								\
+	oldval;							\
+})
+
+#define arch_cmpxchg(ptr, o, n)	__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg_local(ptr, old, val)			\
+({								\
+	__typeof__(*(ptr)) __retval;				\
+	int __size = sizeof(*(ptr));				\
+								\
+	switch (__size) {					\
+	case 4:							\
+		__retval = __cmpxchg_32(ptr, old, val);		\
+		break;						\
+	default:						\
+		__retval = __generic_cmpxchg_local(ptr, old,	\
+						val, __size);	\
+		break;						\
+	}							\
+								\
+	__retval;						\
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
 #endif /* _ASM_CMPXCHG_H */