@@ -27,12 +27,10 @@
#ifdef CONFIG_KASAN
void kasan_early_init(void);
-void kasan_mmu_init(void);
void kasan_init(void);
void kasan_late_init(void);
#else
static inline void kasan_init(void) { }
-static inline void kasan_mmu_init(void) { }
static inline void kasan_late_init(void) { }
#endif
@@ -170,8 +170,6 @@ void __init MMU_init(void)
btext_unmap();
#endif
- kasan_mmu_init();
-
setup_kup();
/* Shortly after that, the entire linear mapping will be available */
@@ -131,7 +131,7 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
flush_tlb_kernel_range(k_start, k_end);
}
-void __init kasan_mmu_init(void)
+static void __init kasan_mmu_init(void)
{
int ret;
struct memblock_region *reg;
@@ -159,6 +159,8 @@ void __init kasan_mmu_init(void)
void __init kasan_init(void)
{
+ kasan_mmu_init();
+
kasan_remap_early_shadow_ro();
clear_page(kasan_early_shadow_page);
Doing kasan pages allocation in MMU_init is too early, kernel doesn't have access yet to the entire memory space and memblock_alloc() fails when the kernel is a bit big. Do it from kasan_init() instead. Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> --- arch/powerpc/include/asm/kasan.h | 2 -- arch/powerpc/mm/init_32.c | 2 -- arch/powerpc/mm/kasan/kasan_init_32.c | 4 +++- 3 files changed, 3 insertions(+), 5 deletions(-)