diff mbox series

[v9,2/6] kasan: allow architectures to provide an outline readiness check

Message ID 20201201161632.1234753-3-dja@axtens.net (mailing list archive)
State Superseded
Headers show
Series KASAN for powerpc64 radix | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (78c312324391ee996944e1196123b0060888e189)
snowpatch_ozlabs/checkpatch warning total: 1 errors, 0 warnings, 0 checks, 47 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Daniel Axtens Dec. 1, 2020, 4:16 p.m. UTC
Allow architectures to define a kasan_arch_is_ready() hook that bails
out of any function that's about to touch the shadow unless the arch
says that it is ready for the memory to be accessed. This is fairly
uninvasive and should have a negligible performance penalty.

This will only work in outline mode, so an arch must specify
HAVE_ARCH_NO_KASAN_INLINE if it requires this.

Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Daniel Axtens <dja@axtens.net>

--

I discuss the justfication for this later in the series. Also,
both previous RFCs for ppc64 - by 2 different people - have
needed this trick! See:
 - https://lore.kernel.org/patchwork/patch/592820/ # ppc64 hash series
 - https://patchwork.ozlabs.org/patch/795211/      # ppc radix series
---
 include/linux/kasan.h |  4 ++++
 mm/kasan/common.c     | 10 ++++++++++
 mm/kasan/generic.c    |  3 +++
 3 files changed, 17 insertions(+)

Comments

Christophe Leroy Dec. 1, 2020, 4:53 p.m. UTC | #1
Le 01/12/2020 à 17:16, Daniel Axtens a écrit :
> Allow architectures to define a kasan_arch_is_ready() hook that bails
> out of any function that's about to touch the shadow unless the arch
> says that it is ready for the memory to be accessed. This is fairly
> uninvasive and should have a negligible performance penalty.
> 
> This will only work in outline mode, so an arch must specify
> HAVE_ARCH_NO_KASAN_INLINE if it requires this.
> 
> Cc: Balbir Singh <bsingharora@gmail.com>
> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>

Did I signed that off one day ? I can't remember.

Please update my email address, and maybe change it to a Suggested-by: ? I think the first 
Signed-off-by: has to be the author of the patch.

> Signed-off-by: Daniel Axtens <dja@axtens.net>
> 
> --
> 
> I discuss the justfication for this later in the series. Also,
> both previous RFCs for ppc64 - by 2 different people - have
> needed this trick! See:
>   - https://lore.kernel.org/patchwork/patch/592820/ # ppc64 hash series
>   - https://patchwork.ozlabs.org/patch/795211/      # ppc radix series
> ---
>   include/linux/kasan.h |  4 ++++
>   mm/kasan/common.c     | 10 ++++++++++
>   mm/kasan/generic.c    |  3 +++
>   3 files changed, 17 insertions(+)
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 30d343b4a40a..3df66fdf6662 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -20,6 +20,10 @@ struct kunit_kasan_expectation {
>   	bool report_found;
>   };
>   
> +#ifndef kasan_arch_is_ready
> +static inline bool kasan_arch_is_ready(void)	{ return true; }
> +#endif
> +
>   extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
>   extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
>   extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 950fd372a07e..ba7744d3e319 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -117,6 +117,9 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
>   {
>   	void *shadow_start, *shadow_end;
>   
> +	if (!kasan_arch_is_ready())
> +		return;
> +
>   	/*
>   	 * Perform shadow offset calculation based on untagged address, as
>   	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
> @@ -134,6 +137,9 @@ void kasan_unpoison_shadow(const void *address, size_t size)
>   {
>   	u8 tag = get_tag(address);
>   
> +	if (!kasan_arch_is_ready())
> +		return;
> +
>   	/*
>   	 * Perform shadow offset calculation based on untagged address, as
>   	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
> @@ -406,6 +412,10 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>   	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
>   		return false;
>   
> +	/* We can't read the shadow byte if the arch isn't ready */
> +	if (!kasan_arch_is_ready())
> +		return false;
> +
>   	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
>   	if (shadow_invalid(tag, shadow_byte)) {
>   		kasan_report_invalid_free(tagged_object, ip);
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 248264b9cb76..e87404026b2b 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -169,6 +169,9 @@ static __always_inline bool check_memory_region_inline(unsigned long addr,
>   						size_t size, bool write,
>   						unsigned long ret_ip)
>   {
> +	if (!kasan_arch_is_ready())
> +		return true;
> +
>   	if (unlikely(size == 0))
>   		return true;
>   
>
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 30d343b4a40a..3df66fdf6662 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -20,6 +20,10 @@  struct kunit_kasan_expectation {
 	bool report_found;
 };
 
+#ifndef kasan_arch_is_ready
+static inline bool kasan_arch_is_ready(void)	{ return true; }
+#endif
+
 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 950fd372a07e..ba7744d3e319 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -117,6 +117,9 @@  void kasan_poison_shadow(const void *address, size_t size, u8 value)
 {
 	void *shadow_start, *shadow_end;
 
+	if (!kasan_arch_is_ready())
+		return;
+
 	/*
 	 * Perform shadow offset calculation based on untagged address, as
 	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
@@ -134,6 +137,9 @@  void kasan_unpoison_shadow(const void *address, size_t size)
 {
 	u8 tag = get_tag(address);
 
+	if (!kasan_arch_is_ready())
+		return;
+
 	/*
 	 * Perform shadow offset calculation based on untagged address, as
 	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
@@ -406,6 +412,10 @@  static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 		return false;
 
+	/* We can't read the shadow byte if the arch isn't ready */
+	if (!kasan_arch_is_ready())
+		return false;
+
 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 	if (shadow_invalid(tag, shadow_byte)) {
 		kasan_report_invalid_free(tagged_object, ip);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 248264b9cb76..e87404026b2b 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -169,6 +169,9 @@  static __always_inline bool check_memory_region_inline(unsigned long addr,
 						size_t size, bool write,
 						unsigned long ret_ip)
 {
+	if (!kasan_arch_is_ready())
+		return true;
+
 	if (unlikely(size == 0))
 		return true;