@@ -176,6 +176,7 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
+ select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -428,6 +428,7 @@ endif
endif
ifdef CONFIG_KASAN
+ifdef CONFIG_PPC32
ifndef CONFIG_PPC_BOOK3S_32
prepare: kasan_prepare
@@ -435,6 +436,7 @@ kasan_prepare: prepare0
$(eval KASAN_SHADOW_OFFSET = $(shell awk '{if ($$2 == "KASAN_SHADOW_OFFSET") print $$3;}' include/generated/asm-offsets.h))
endif
endif
+endif
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
@@ -15,6 +15,7 @@
#ifndef __ASSEMBLY__
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtable-types.h>
#include <linux/jump_label.h>
@@ -25,6 +26,7 @@
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+#ifdef CONFIG_PPC32
#include <asm/fixmap.h>
#define KASAN_SHADOW_START (ALIGN_DOWN(FIXADDR_START - KASAN_SHADOW_SIZE, \
@@ -33,13 +35,15 @@
#define KASAN_SHADOW_SIZE ((~0UL - PAGE_OFFSET + 1) >> KASAN_SHADOW_SCALE_SHIFT)
void kasan_early_init(void);
+#endif /* CONFIG_PPC32 */
+
void kasan_init(void);
extern struct static_key_false powerpc_kasan_enabled_key;
static inline bool kasan_arch_is_ready(void)
{
- if (!IS_ENABLED(CONFIG_BOOK3S_32))
+ if (IS_ENABLED(CONFIG_PPC_32) && !IS_ENABLED(CONFIG_BOOK3S_32))
return true;
if (static_branch_likely(&powerpc_kasan_enabled_key))
return true;
@@ -47,5 +51,55 @@ static inline bool kasan_arch_is_ready(void)
}
#define kasan_arch_is_ready kasan_arch_is_ready
+#ifdef CONFIG_PPC_BOOK3E_64
+#define KASAN_SHADOW_START VMEMMAP_BASE
+#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+
+static inline void *kasan_mem_to_shadow_book3e(const void *addr)
+{
+ if ((unsigned long)addr >= KERN_VIRT_START &&
+ (unsigned long)addr < (KERN_VIRT_START + KERN_VIRT_SIZE)) {
+ return (void *)kasan_early_shadow_page;
+ }
+
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+#define kasan_mem_to_shadow kasan_mem_to_shadow_book3e
+
+static inline void *kasan_shadow_to_mem_book3e(const void *shadow_addr)
+{
+ /*
+ * We map the entire non-linear virtual mapping onto the zero page so if
+ * we are asked to map the zero page back just pick the beginning of that
+ * area.
+ */
+ if (shadow_addr >= (void *)kasan_early_shadow_page &&
+ shadow_addr < (void *)(kasan_early_shadow_page + PAGE_SIZE)) {
+ return (void *)KERN_VIRT_START;
+ }
+
+ return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
+ << KASAN_SHADOW_SCALE_SHIFT);
+}
+#define kasan_shadow_to_mem kasan_shadow_to_mem_book3e
+
+static inline bool kasan_addr_has_shadow_book3e(const void *addr)
+{
+ /*
+ * We want to specifically assert that the addresses in the 0x8000...
+ * region have a shadow, otherwise they are considered by the kasan
+ * core to be wild pointers
+ */
+ if ((unsigned long)addr >= KERN_VIRT_START &&
+ (unsigned long)addr < (KERN_VIRT_START + KERN_VIRT_SIZE)) {
+ return true;
+ }
+ return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+}
+#define kasan_addr_has_shadow kasan_addr_has_shadow_book3e
+
+#endif /* CONFIG_PPC_BOOK3E_64 */
+
#endif /* __ASSEMBLY */
#endif
@@ -7,6 +7,8 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
+KASAN_SANITIZE_fsl_booke_mmu.o := n
+
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
init-common.o mmu_context.o drmem.o
@@ -3,3 +3,4 @@
KASAN_SANITIZE := n
obj-$(CONFIG_PPC32) += kasan_init_32.o
+obj-$(CONFIG_PPC_BOOK3E_64) += kasan_init_book3e_64.o
new file mode 100644
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+#include <asm/pgalloc.h>
+
+DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+EXPORT_SYMBOL(powerpc_kasan_enabled_key);
+
+static void __init kasan_init_region(struct memblock_region *reg)
+{
+ void *start = __va(reg->base);
+ void *end = __va(reg->base + reg->size);
+ unsigned long k_start, k_end, k_cur;
+
+ if (start >= end)
+ return;
+
+ k_start = (unsigned long)kasan_mem_to_shadow(start);
+ k_end = (unsigned long)kasan_mem_to_shadow(end);
+
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
+ void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+}
+
+void __init kasan_init(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg)
+ kasan_init_region(reg);
+
+ /* map the zero page RO */
+ map_kernel_page((unsigned long)kasan_early_shadow_page,
+ __pa(kasan_early_shadow_page), PAGE_KERNEL_RO);
+
+ /* Turn on checking */
+ static_branch_inc(&powerpc_kasan_enabled_key);
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done (64-bit Book3E)\n");
+}