diff mbox series

[v4,1/7] kcsan: Add atomic builtin stubs for 32-bit systems

Message ID 20230208032202.1357949-2-rmclure@linux.ibm.com (mailing list archive)
State Changes Requested
Headers show
Series powerpc: Add KCSAN Support | expand

Commit Message

Rohan McLure Feb. 8, 2023, 3:21 a.m. UTC
KCSAN instruments calls to atomic builtins, and will in turn call these
builtins itself. As such, architectures supporting KCSAN must have
compiler support for these atomic primitives.

Since 32-bit systems are unlikely to have 64-bit compiler builtins,
provide a stub for each missing builtin, and use BUG() to assert
unreachability.

In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
locally, but does not advertise the fact with preprocessor macros. To
avoid production of duplicate symbols, do not build the stubs on xtensa.
A future patch will remove the xtensa implementation of these stubs.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
v4: New patch
---
 kernel/kcsan/Makefile |  3 ++
 kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)
 create mode 100644 kernel/kcsan/stubs.c

Comments

Max Filippov Feb. 8, 2023, 4:23 a.m. UTC | #1
On Tue, Feb 7, 2023 at 7:22 PM Rohan McLure <rmclure@linux.ibm.com> wrote:
>
> KCSAN instruments calls to atomic builtins, and will in turn call these
> builtins itself. As such, architectures supporting KCSAN must have
> compiler support for these atomic primitives.
>
> Since 32-bit systems are unlikely to have 64-bit compiler builtins,
> provide a stub for each missing builtin, and use BUG() to assert
> unreachability.
>
> In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
> locally, but does not advertise the fact with preprocessor macros. To
> avoid production of duplicate symbols, do not build the stubs on xtensa.
> A future patch will remove the xtensa implementation of these stubs.
>
> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
> ---
> v4: New patch
> ---
>  kernel/kcsan/Makefile |  3 ++
>  kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 81 insertions(+)
>  create mode 100644 kernel/kcsan/stubs.c

Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
Christophe Leroy Feb. 8, 2023, 12:23 p.m. UTC | #2
Le 08/02/2023 à 04:21, Rohan McLure a écrit :
> KCSAN instruments calls to atomic builtins, and will in turn call these
> builtins itself. As such, architectures supporting KCSAN must have
> compiler support for these atomic primitives.
> 
> Since 32-bit systems are unlikely to have 64-bit compiler builtins,
> provide a stub for each missing builtin, and use BUG() to assert
> unreachability.
> 
> In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
> locally, but does not advertise the fact with preprocessor macros. To
> avoid production of duplicate symbols, do not build the stubs on xtensa.
> A future patch will remove the xtensa implementation of these stubs.
> 
> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
> ---
> v4: New patch
> ---
>   kernel/kcsan/Makefile |  3 ++
>   kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 81 insertions(+)
>   create mode 100644 kernel/kcsan/stubs.c

I think it would be better to merge patch 1 and patch 2, that way we 
would keep the history and see that stubs.c almost comes from xtensa.

The summary would then be:

  arch/xtensa/lib/Makefile                              |  1 -
  kernel/kcsan/Makefile                                 |  2 +-
  arch/xtensa/lib/kcsan-stubs.c => kernel/kcsan/stubs.c | 26 
+++++++++++++++++++++++++-
  3 files changed, 26 insertions(+), 3 deletions(-)


> 
> diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
> index 8cf70f068d92..5dfc5825aae9 100644
> --- a/kernel/kcsan/Makefile
> +++ b/kernel/kcsan/Makefile
> @@ -12,6 +12,9 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
>   	-fno-stack-protector -DDISABLE_BRANCH_PROFILING
>   
>   obj-y := core.o debugfs.o report.o
> +ifndef XTENSA
> +	obj-y += stubs.o

obj-$(CONFIG_32BIT) += stubs.o

That would avoid the #if CONFIG_32BIT in stubs.c

> +endif
>   
>   KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
>   obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
> diff --git a/kernel/kcsan/stubs.c b/kernel/kcsan/stubs.c
> new file mode 100644
> index 000000000000..ec5cd99be422
> --- /dev/null
> +++ b/kernel/kcsan/stubs.c
> @@ -0,0 +1,78 @@
> +// SPDX-License Identifier: GPL-2.0

Missing - between License and Identifier ?

> +
> +#include <linux/bug.h>
> +#include <linux/types.h>
> +
> +#ifdef CONFIG_32BIT

Should be handled in Makefile

> +
> +#if !__has_builtin(__atomic_store_8)

Does any 32 bit ARCH support that ? Is that #if required ?

If yes, do we really need the #if for each and every function, can't we 
just check for one and assume that if we don't have __atomic_store_8 we 
don't have any of the functions ?


> +void __atomic_store_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_load_8)
> +u64 __atomic_load_8(const volatile void *p, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_exchange_8)
> +u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_compare_exchange_8)
> +bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_add_8)
> +u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_sub_8)
> +u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_and_8)
> +u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_or_8)
> +u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_xor_8)
> +u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#if !__has_builtin(__atomic_fetch_nand_8)
> +u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
> +{
> +	BUG();
> +}
> +#endif
> +
> +#endif /* CONFIG_32BIT */
Rohan McLure Feb. 8, 2023, 11:14 p.m. UTC | #3
> On 8 Feb 2023, at 11:23 pm, Christophe Leroy <christophe.leroy@csgroup.eu> wrote:
> 
> 
> 
> Le 08/02/2023 à 04:21, Rohan McLure a écrit :
>> KCSAN instruments calls to atomic builtins, and will in turn call these
>> builtins itself. As such, architectures supporting KCSAN must have
>> compiler support for these atomic primitives.
>> 
>> Since 32-bit systems are unlikely to have 64-bit compiler builtins,
>> provide a stub for each missing builtin, and use BUG() to assert
>> unreachability.
>> 
>> In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
>> locally, but does not advertise the fact with preprocessor macros. To
>> avoid production of duplicate symbols, do not build the stubs on xtensa.
>> A future patch will remove the xtensa implementation of these stubs.
>> 
>> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
>> ---
>> v4: New patch
>> ---
>>  kernel/kcsan/Makefile |  3 ++
>>  kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
>>  2 files changed, 81 insertions(+)
>>  create mode 100644 kernel/kcsan/stubs.c
> 
> I think it would be better to merge patch 1 and patch 2, that way we 
> would keep the history and see that stubs.c almost comes from xtensa.
> 
> The summary would then be:
> 
>  arch/xtensa/lib/Makefile                              |  1 -
>  kernel/kcsan/Makefile                                 |  2 +-
>  arch/xtensa/lib/kcsan-stubs.c => kernel/kcsan/stubs.c | 26 
> +++++++++++++++++++++++++-
>  3 files changed, 26 insertions(+), 3 deletions(-)
> 
> 
>> 
>> diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
>> index 8cf70f068d92..5dfc5825aae9 100644
>> --- a/kernel/kcsan/Makefile
>> +++ b/kernel/kcsan/Makefile
>> @@ -12,6 +12,9 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
>>   -fno-stack-protector -DDISABLE_BRANCH_PROFILING
>> 
>>  obj-y := core.o debugfs.o report.o
>> +ifndef XTENSA
>> + obj-y += stubs.o
> 
> obj-$(CONFIG_32BIT) += stubs.o
> 
> That would avoid the #if CONFIG_32BIT in stubs.c

Thanks. Yes happy to do this.

> 
>> +endif
>> 
>>  KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
>>  obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
>> diff --git a/kernel/kcsan/stubs.c b/kernel/kcsan/stubs.c
>> new file mode 100644
>> index 000000000000..ec5cd99be422
>> --- /dev/null
>> +++ b/kernel/kcsan/stubs.c
>> @@ -0,0 +1,78 @@
>> +// SPDX-License Identifier: GPL-2.0
> 
> Missing - between License and Identifier ?
> 
>> +
>> +#include <linux/bug.h>
>> +#include <linux/types.h>
>> +
>> +#ifdef CONFIG_32BIT
> 
> Should be handled in Makefile
> 
>> +
>> +#if !__has_builtin(__atomic_store_8)
> 
> Does any 32 bit ARCH support that ? Is that #if required ?
> 
> If yes, do we really need the #if for each and every function, can't we 
> just check for one and assume that if we don't have __atomic_store_8 we 
> don't have any of the functions ?

Turns out that testing with gcc provides 8-byte atomic builtins on x86
and arm on 32-bit. However I believe it should just suffice to check for
__atomic_store_8 or any other such builtin i.e. if an arch implements one it
likely implements them all from what I’ve seen.

> 
> 
>> +void __atomic_store_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_load_8)
>> +u64 __atomic_load_8(const volatile void *p, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_exchange_8)
>> +u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_compare_exchange_8)
>> +bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_add_8)
>> +u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_sub_8)
>> +u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_and_8)
>> +u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_or_8)
>> +u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_xor_8)
>> +u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#if !__has_builtin(__atomic_fetch_nand_8)
>> +u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
>> +{
>> + BUG();
>> +}
>> +#endif
>> +
>> +#endif /* CONFIG_32BIT */
Rohan McLure Feb. 9, 2023, 11:36 p.m. UTC | #4
> On 9 Feb 2023, at 10:14 am, Rohan McLure <rmclure@linux.ibm.com> wrote:
> 
> 
> 
>> On 8 Feb 2023, at 11:23 pm, Christophe Leroy <christophe.leroy@csgroup.eu> wrote:
>> 
>> 
>> 
>> Le 08/02/2023 à 04:21, Rohan McLure a écrit :
>>> KCSAN instruments calls to atomic builtins, and will in turn call these
>>> builtins itself. As such, architectures supporting KCSAN must have
>>> compiler support for these atomic primitives.
>>> 
>>> Since 32-bit systems are unlikely to have 64-bit compiler builtins,
>>> provide a stub for each missing builtin, and use BUG() to assert
>>> unreachability.
>>> 
>>> In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
>>> locally, but does not advertise the fact with preprocessor macros. To
>>> avoid production of duplicate symbols, do not build the stubs on xtensa.
>>> A future patch will remove the xtensa implementation of these stubs.
>>> 
>>> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
>>> ---
>>> v4: New patch
>>> ---
>>> kernel/kcsan/Makefile |  3 ++
>>> kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
>>> 2 files changed, 81 insertions(+)
>>> create mode 100644 kernel/kcsan/stubs.c
>> 
>> I think it would be better to merge patch 1 and patch 2, that way we 
>> would keep the history and see that stubs.c almost comes from xtensa.
>> 
>> The summary would then be:
>> 
>> arch/xtensa/lib/Makefile                              |  1 -
>> kernel/kcsan/Makefile                                 |  2 +-
>> arch/xtensa/lib/kcsan-stubs.c => kernel/kcsan/stubs.c | 26 
>> +++++++++++++++++++++++++-
>> 3 files changed, 26 insertions(+), 3 deletions(-)
>> 
>> 
>>> 
>>> diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
>>> index 8cf70f068d92..5dfc5825aae9 100644
>>> --- a/kernel/kcsan/Makefile
>>> +++ b/kernel/kcsan/Makefile
>>> @@ -12,6 +12,9 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
>>>  -fno-stack-protector -DDISABLE_BRANCH_PROFILING
>>> 
>>> obj-y := core.o debugfs.o report.o
>>> +ifndef XTENSA
>>> + obj-y += stubs.o
>> 
>> obj-$(CONFIG_32BIT) += stubs.o
>> 
>> That would avoid the #if CONFIG_32BIT in stubs.c
> 
> Thanks. Yes happy to do this.
> 
>> 
>>> +endif
>>> 
>>> KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
>>> obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
>>> diff --git a/kernel/kcsan/stubs.c b/kernel/kcsan/stubs.c
>>> new file mode 100644
>>> index 000000000000..ec5cd99be422
>>> --- /dev/null
>>> +++ b/kernel/kcsan/stubs.c
>>> @@ -0,0 +1,78 @@
>>> +// SPDX-License Identifier: GPL-2.0
>> 
>> Missing - between License and Identifier ?
>> 
>>> +
>>> +#include <linux/bug.h>
>>> +#include <linux/types.h>
>>> +
>>> +#ifdef CONFIG_32BIT
>> 
>> Should be handled in Makefile
>> 
>>> +
>>> +#if !__has_builtin(__atomic_store_8)
>> 
>> Does any 32 bit ARCH support that ? Is that #if required ?
>> 
>> If yes, do we really need the #if for each and every function, can't we 
>> just check for one and assume that if we don't have __atomic_store_8 we 
>> don't have any of the functions ?
> 
> Turns out that testing with gcc provides 8-byte atomic builtins on x86
> and arm on 32-bit. However I believe it should just suffice to check for
> __atomic_store_8 or any other such builtin i.e. if an arch implements one it
> likely implements them all from what I’ve seen.

In reality, __has_builtin only specifies that GCC is aware of the existance of
the builtin, but linking against libatomic may still be required. Let’s
remove this check, and have ppc32 and xtensa opt into compiling this stubs file.

Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108734

> 
>> 
>> 
>>> +void __atomic_store_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_load_8)
>>> +u64 __atomic_load_8(const volatile void *p, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_exchange_8)
>>> +u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_compare_exchange_8)
>>> +bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_add_8)
>>> +u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_sub_8)
>>> +u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_and_8)
>>> +u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_or_8)
>>> +u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_xor_8)
>>> +u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#if !__has_builtin(__atomic_fetch_nand_8)
>>> +u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
>>> +{
>>> + BUG();
>>> +}
>>> +#endif
>>> +
>>> +#endif /* CONFIG_32BIT */
diff mbox series

Patch

diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
index 8cf70f068d92..5dfc5825aae9 100644
--- a/kernel/kcsan/Makefile
+++ b/kernel/kcsan/Makefile
@@ -12,6 +12,9 @@  CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
 	-fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 obj-y := core.o debugfs.o report.o
+ifndef XTENSA
+	obj-y += stubs.o
+endif
 
 KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
 obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
diff --git a/kernel/kcsan/stubs.c b/kernel/kcsan/stubs.c
new file mode 100644
index 000000000000..ec5cd99be422
--- /dev/null
+++ b/kernel/kcsan/stubs.c
@@ -0,0 +1,78 @@ 
+// SPDX-License Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_32BIT
+
+#if !__has_builtin(__atomic_store_8)
+void __atomic_store_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_load_8)
+u64 __atomic_load_8(const volatile void *p, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_exchange_8)
+u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_compare_exchange_8)
+bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_add_8)
+u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_sub_8)
+u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_and_8)
+u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_or_8)
+u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_xor_8)
+u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_nand_8)
+u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
+{
+	BUG();
+}
+#endif
+
+#endif /* CONFIG_32BIT */