diff mbox series

powerpc/ftrace: Ignore ftrace locations in exit text sections

Message ID 20240209075931.666935-1-naveen@kernel.org (mailing list archive)
State Superseded
Headers show
Series powerpc/ftrace: Ignore ftrace locations in exit text sections | expand

Checks

Context Check Description
snowpatch_ozlabs/github-powerpc_ppctests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_selftests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_sparse success Successfully ran 4 jobs.
snowpatch_ozlabs/github-powerpc_clang fail 2 of 6 jobs failed.
snowpatch_ozlabs/github-powerpc_kernel_qemu success Successfully ran 23 jobs.

Commit Message

Naveen N Rao Feb. 9, 2024, 7:59 a.m. UTC
Michael reported that we are seeing ftrace bug on bootup when KASAN is
enabled, and if we are using -fpatchable-function-entry:

    ftrace: allocating 47780 entries in 18 pages
    ftrace-powerpc: 0xc0000000020b3d5c: No module provided for non-kernel address
    ------------[ ftrace bug ]------------
    ftrace faulted on modifying
    [<c0000000020b3d5c>] 0xc0000000020b3d5c
    Initializing ftrace call sites
    ftrace record flags: 0
     (0)
     expected tramp: c00000000008cef4
    ------------[ cut here ]------------
    WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2180 ftrace_bug+0x3c0/0x424
    Modules linked in:
    CPU: 0 PID: 0 Comm: swapper Not tainted 6.5.0-rc3-00120-g0f71dcfb4aef #860
    Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
    NIP:  c0000000003aa81c LR: c0000000003aa818 CTR: 0000000000000000
    REGS: c0000000033cfab0 TRAP: 0700   Not tainted  (6.5.0-rc3-00120-g0f71dcfb4aef)
    MSR:  8000000002021033 <SF,VEC,ME,IR,DR,RI,LE>  CR: 28028240  XER: 00000000
    CFAR: c0000000002781a8 IRQMASK: 3
    ...
    NIP [c0000000003aa81c] ftrace_bug+0x3c0/0x424
    LR [c0000000003aa818] ftrace_bug+0x3bc/0x424
    Call Trace:
     ftrace_bug+0x3bc/0x424 (unreliable)
     ftrace_process_locs+0x5f4/0x8a0
     ftrace_init+0xc0/0x1d0
     start_kernel+0x1d8/0x484

With CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y and
CONFIG_KASAN=y, compiler emits nops in functions that it generates for
registering and unregistering global variables (unlike with -pg and
-mprofile-kernel where calls to _mcount() are not generated in those
functions). Those functions then end up in INIT_TEXT and EXIT_TEXT
respectively. We don't expect to see any profiled functions in
EXIT_TEXT, so ftrace_init_nop() assumes that all addresses that aren't
in the core kernel text belongs to a module. Since these functions do
not match that criteria, we see the above bug.

Address this by having ftrace ignore all locations in the text exit
sections of vmlinux.

Fixes: 0f71dcfb4aef ("powerpc/ftrace: Add support for -fpatchable-function-entry")
Cc: stable@vger.kernel.org
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Naveen N Rao <naveen@kernel.org>
---
 arch/powerpc/include/asm/ftrace.h   |  9 +--------
 arch/powerpc/include/asm/sections.h |  1 +
 arch/powerpc/kernel/trace/ftrace.c  | 12 ++++++++++++
 arch/powerpc/kernel/vmlinux.lds.S   |  2 ++
 4 files changed, 16 insertions(+), 8 deletions(-)


base-commit: 4ef8376c466ae8b03e632dd8eca1e44315f7dd61

Comments

Christophe Leroy Feb. 12, 2024, 7:31 p.m. UTC | #1
Le 09/02/2024 à 08:59, Naveen N Rao a écrit :
> Michael reported that we are seeing ftrace bug on bootup when KASAN is
> enabled, and if we are using -fpatchable-function-entry:
> 
>      ftrace: allocating 47780 entries in 18 pages
>      ftrace-powerpc: 0xc0000000020b3d5c: No module provided for non-kernel address
>      ------------[ ftrace bug ]------------
>      ftrace faulted on modifying
>      [<c0000000020b3d5c>] 0xc0000000020b3d5c
>      Initializing ftrace call sites
>      ftrace record flags: 0
>       (0)
>       expected tramp: c00000000008cef4
>      ------------[ cut here ]------------
>      WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2180 ftrace_bug+0x3c0/0x424
>      Modules linked in:
>      CPU: 0 PID: 0 Comm: swapper Not tainted 6.5.0-rc3-00120-g0f71dcfb4aef #860
>      Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
>      NIP:  c0000000003aa81c LR: c0000000003aa818 CTR: 0000000000000000
>      REGS: c0000000033cfab0 TRAP: 0700   Not tainted  (6.5.0-rc3-00120-g0f71dcfb4aef)
>      MSR:  8000000002021033 <SF,VEC,ME,IR,DR,RI,LE>  CR: 28028240  XER: 00000000
>      CFAR: c0000000002781a8 IRQMASK: 3
>      ...
>      NIP [c0000000003aa81c] ftrace_bug+0x3c0/0x424
>      LR [c0000000003aa818] ftrace_bug+0x3bc/0x424
>      Call Trace:
>       ftrace_bug+0x3bc/0x424 (unreliable)
>       ftrace_process_locs+0x5f4/0x8a0
>       ftrace_init+0xc0/0x1d0
>       start_kernel+0x1d8/0x484
> 
> With CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y and
> CONFIG_KASAN=y, compiler emits nops in functions that it generates for
> registering and unregistering global variables (unlike with -pg and
> -mprofile-kernel where calls to _mcount() are not generated in those
> functions). Those functions then end up in INIT_TEXT and EXIT_TEXT
> respectively. We don't expect to see any profiled functions in
> EXIT_TEXT, so ftrace_init_nop() assumes that all addresses that aren't
> in the core kernel text belongs to a module. Since these functions do
> not match that criteria, we see the above bug.
> 
> Address this by having ftrace ignore all locations in the text exit
> sections of vmlinux.
> 
> Fixes: 0f71dcfb4aef ("powerpc/ftrace: Add support for -fpatchable-function-entry")
> Cc: stable@vger.kernel.org
> Reported-by: Michael Ellerman <mpe@ellerman.id.au>
> Signed-off-by: Naveen N Rao <naveen@kernel.org>
> ---
>   arch/powerpc/include/asm/ftrace.h   |  9 +--------
>   arch/powerpc/include/asm/sections.h |  1 +
>   arch/powerpc/kernel/trace/ftrace.c  | 12 ++++++++++++
>   arch/powerpc/kernel/vmlinux.lds.S   |  2 ++
>   4 files changed, 16 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
> index 1ebd2ca97f12..d6babd083202 100644
> --- a/arch/powerpc/include/asm/ftrace.h
> +++ b/arch/powerpc/include/asm/ftrace.h
> @@ -20,14 +20,7 @@
>   #ifndef __ASSEMBLY__
>   extern void _mcount(void);
>   
> -static inline unsigned long ftrace_call_adjust(unsigned long addr)
> -{
> -	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
> -		addr += MCOUNT_INSN_SIZE;
> -
> -	return addr;
> -}
> -
> +unsigned long ftrace_call_adjust(unsigned long addr);
>   unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
>   				    unsigned long sp);
>   
> diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
> index ea26665f82cf..d389dcecdb0b 100644
> --- a/arch/powerpc/include/asm/sections.h
> +++ b/arch/powerpc/include/asm/sections.h
> @@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
>   
>   extern char __head_end[];
>   extern char __srwx_boundary[];
> +extern char _sexittext[], _eexittext[];

Should we try to at least use the same symbols as others, or best try to 
move this into include/asm-generic/sections.h, just like inittext ?

$ git grep exittext
arch/arm64/include/asm/sections.h:extern char __exittext_begin[], 
__exittext_end[];
arch/arm64/kernel/patching.c:           addr >= (unsigned 
long)__exittext_begin &&
arch/arm64/kernel/patching.c:           addr < (unsigned 
long)__exittext_end;
arch/arm64/kernel/vmlinux.lds.S:        __exittext_begin = .;
arch/arm64/kernel/vmlinux.lds.S:        __exittext_end = .;
arch/riscv/include/asm/sections.h:extern char __exittext_begin[], 
__exittext_end[];
arch/riscv/kernel/patch.c:static inline bool 
is_kernel_exittext(uintptr_t addr)
arch/riscv/kernel/patch.c:              addr >= 
(uintptr_t)__exittext_begin &&
arch/riscv/kernel/patch.c:              addr < (uintptr_t)__exittext_end;
arch/riscv/kernel/patch.c:      if (core_kernel_text(uintaddr) || 
is_kernel_exittext(uintaddr))
arch/riscv/kernel/vmlinux-xip.lds.S:    __exittext_begin = .;
arch/riscv/kernel/vmlinux-xip.lds.S:    __exittext_end = .;
arch/riscv/kernel/vmlinux.lds.S:        __exittext_begin = .;
arch/riscv/kernel/vmlinux.lds.S:        __exittext_end = .;


>   
>   /* Patch sites */
>   extern s32 patch__call_flush_branch_caches1;
> diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
> index 82010629cf88..b5efd8d7bc01 100644
> --- a/arch/powerpc/kernel/trace/ftrace.c
> +++ b/arch/powerpc/kernel/trace/ftrace.c
> @@ -27,10 +27,22 @@
>   #include <asm/ftrace.h>
>   #include <asm/syscall.h>
>   #include <asm/inst.h>
> +#include <asm/sections.h>
>   
>   #define	NUM_FTRACE_TRAMPS	2
>   static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
>   
> +unsigned long ftrace_call_adjust(unsigned long addr)
> +{
> +	if (addr >= (unsigned long)_sexittext && addr < (unsigned long)_eexittext)
> +		return 0;

Then arm64 has a function called is_exit_text() and riscv has 
is_kernel_exittext(). Can we refactor ?

> +
> +	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
> +		addr += MCOUNT_INSN_SIZE;
> +
> +	return addr;
> +}
> +
>   static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
>   {
>   	ppc_inst_t op;
> diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
> index 1c5970df3233..9c376ae6857d 100644
> --- a/arch/powerpc/kernel/vmlinux.lds.S
> +++ b/arch/powerpc/kernel/vmlinux.lds.S
> @@ -281,7 +281,9 @@ SECTIONS
>   	 * to deal with references from __bug_table
>   	 */
>   	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
> +		_sexittext = .;
>   		EXIT_TEXT
> +		_eexittext = .;
>   	}
>   
>   	. = ALIGN(PAGE_SIZE);
> 
> base-commit: 4ef8376c466ae8b03e632dd8eca1e44315f7dd61
Michael Ellerman Feb. 13, 2024, 1:51 a.m. UTC | #2
Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> Le 09/02/2024 à 08:59, Naveen N Rao a écrit :
>> Michael reported that we are seeing ftrace bug on bootup when KASAN is
>> enabled, and if we are using -fpatchable-function-entry:
>> 
...
>> diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
>> index ea26665f82cf..d389dcecdb0b 100644
>> --- a/arch/powerpc/include/asm/sections.h
>> +++ b/arch/powerpc/include/asm/sections.h
>> @@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
>>   
>>   extern char __head_end[];
>>   extern char __srwx_boundary[];
>> +extern char _sexittext[], _eexittext[];
>
> Should we try to at least use the same symbols as others, or best try to 
> move this into include/asm-generic/sections.h, just like inittext ?
>
> $ git grep exittext
> arch/arm64/include/asm/sections.h:extern char __exittext_begin[], 
> __exittext_end[];
> arch/arm64/kernel/patching.c:           addr >= (unsigned 
> long)__exittext_begin &&
> arch/arm64/kernel/patching.c:           addr < (unsigned 
> long)__exittext_end;
> arch/arm64/kernel/vmlinux.lds.S:        __exittext_begin = .;
> arch/arm64/kernel/vmlinux.lds.S:        __exittext_end = .;
> arch/riscv/include/asm/sections.h:extern char __exittext_begin[], 
> __exittext_end[];
> arch/riscv/kernel/patch.c:static inline bool 
> is_kernel_exittext(uintptr_t addr)
> arch/riscv/kernel/patch.c:              addr >= 
> (uintptr_t)__exittext_begin &&
> arch/riscv/kernel/patch.c:              addr < (uintptr_t)__exittext_end;
> arch/riscv/kernel/patch.c:      if (core_kernel_text(uintaddr) || 
> is_kernel_exittext(uintaddr))
> arch/riscv/kernel/vmlinux-xip.lds.S:    __exittext_begin = .;
> arch/riscv/kernel/vmlinux-xip.lds.S:    __exittext_end = .;
> arch/riscv/kernel/vmlinux.lds.S:        __exittext_begin = .;
> arch/riscv/kernel/vmlinux.lds.S:        __exittext_end = .;

I'll change it to use __exittext_begin/end.

>> diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
>> index 82010629cf88..b5efd8d7bc01 100644
>> --- a/arch/powerpc/kernel/trace/ftrace.c
>> +++ b/arch/powerpc/kernel/trace/ftrace.c
>> @@ -27,10 +27,22 @@
>>   #include <asm/ftrace.h>
>>   #include <asm/syscall.h>
>>   #include <asm/inst.h>
>> +#include <asm/sections.h>
>>   
>>   #define	NUM_FTRACE_TRAMPS	2
>>   static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
>>   
>> +unsigned long ftrace_call_adjust(unsigned long addr)
>> +{
>> +	if (addr >= (unsigned long)_sexittext && addr < (unsigned long)_eexittext)
>> +		return 0;
>
> Then arm64 has a function called is_exit_text() and riscv has 
> is_kernel_exittext(). Can we refactor ?

I'd like to get the fix in and backported, so I'll take it as-is but
with the section names changed to match the other arches.

We can do further refactoring on top.

cheers
Benjamin Gray Feb. 13, 2024, 4:20 a.m. UTC | #3
On Fri, 2024-02-09 at 13:29 +0530, Naveen N Rao wrote:
> Michael reported that we are seeing ftrace bug on bootup when KASAN
> is
> enabled, and if we are using -fpatchable-function-entry:
> 
>     ftrace: allocating 47780 entries in 18 pages
>     ftrace-powerpc: 0xc0000000020b3d5c: No module provided for non-
> kernel address
>     ------------[ ftrace bug ]------------
>     ftrace faulted on modifying
>     [<c0000000020b3d5c>] 0xc0000000020b3d5c
>     Initializing ftrace call sites
>     ftrace record flags: 0
>      (0)
>      expected tramp: c00000000008cef4
>     ------------[ cut here ]------------
>     WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2180
> ftrace_bug+0x3c0/0x424
>     Modules linked in:
>     CPU: 0 PID: 0 Comm: swapper Not tainted 6.5.0-rc3-00120-
> g0f71dcfb4aef #860
>     Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw)
> 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
>     NIP:  c0000000003aa81c LR: c0000000003aa818 CTR: 0000000000000000
>     REGS: c0000000033cfab0 TRAP: 0700   Not tainted  (6.5.0-rc3-
> 00120-g0f71dcfb4aef)
>     MSR:  8000000002021033 <SF,VEC,ME,IR,DR,RI,LE>  CR: 28028240 
> XER: 00000000
>     CFAR: c0000000002781a8 IRQMASK: 3
>     ...
>     NIP [c0000000003aa81c] ftrace_bug+0x3c0/0x424
>     LR [c0000000003aa818] ftrace_bug+0x3bc/0x424
>     Call Trace:
>      ftrace_bug+0x3bc/0x424 (unreliable)
>      ftrace_process_locs+0x5f4/0x8a0
>      ftrace_init+0xc0/0x1d0
>      start_kernel+0x1d8/0x484
> 
> With CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y and
> CONFIG_KASAN=y, compiler emits nops in functions that it generates
> for
> registering and unregistering global variables (unlike with -pg and
> -mprofile-kernel where calls to _mcount() are not generated in those
> functions). Those functions then end up in INIT_TEXT and EXIT_TEXT
> respectively. We don't expect to see any profiled functions in
> EXIT_TEXT, so ftrace_init_nop() assumes that all addresses that
> aren't
> in the core kernel text belongs to a module. Since these functions do
> not match that criteria, we see the above bug.
> 
> Address this by having ftrace ignore all locations in the text exit
> sections of vmlinux.
> 
> Fixes: 0f71dcfb4aef ("powerpc/ftrace: Add support for -fpatchable-
> function-entry")
> Cc: stable@vger.kernel.org
> Reported-by: Michael Ellerman <mpe@ellerman.id.au>
> Signed-off-by: Naveen N Rao <naveen@kernel.org>
> ---
>  arch/powerpc/include/asm/ftrace.h   |  9 +--------
>  arch/powerpc/include/asm/sections.h |  1 +
>  arch/powerpc/kernel/trace/ftrace.c  | 12 ++++++++++++
>  arch/powerpc/kernel/vmlinux.lds.S   |  2 ++
>  4 files changed, 16 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/ftrace.h
> b/arch/powerpc/include/asm/ftrace.h
> index 1ebd2ca97f12..d6babd083202 100644
> --- a/arch/powerpc/include/asm/ftrace.h
> +++ b/arch/powerpc/include/asm/ftrace.h
> @@ -20,14 +20,7 @@
>  #ifndef __ASSEMBLY__
>  extern void _mcount(void);
>  
> -static inline unsigned long ftrace_call_adjust(unsigned long addr)
> -{
> -	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
> -		addr += MCOUNT_INSN_SIZE;
> -
> -	return addr;
> -}
> -
> +unsigned long ftrace_call_adjust(unsigned long addr);
>  unsigned long prepare_ftrace_return(unsigned long parent, unsigned
> long ip,
>  				    unsigned long sp);
>  
> diff --git a/arch/powerpc/include/asm/sections.h
> b/arch/powerpc/include/asm/sections.h
> index ea26665f82cf..d389dcecdb0b 100644
> --- a/arch/powerpc/include/asm/sections.h
> +++ b/arch/powerpc/include/asm/sections.h
> @@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
>  
>  extern char __head_end[];
>  extern char __srwx_boundary[];
> +extern char _sexittext[], _eexittext[];
>  
>  /* Patch sites */
>  extern s32 patch__call_flush_branch_caches1;
> diff --git a/arch/powerpc/kernel/trace/ftrace.c
> b/arch/powerpc/kernel/trace/ftrace.c
> index 82010629cf88..b5efd8d7bc01 100644
> --- a/arch/powerpc/kernel/trace/ftrace.c
> +++ b/arch/powerpc/kernel/trace/ftrace.c
> @@ -27,10 +27,22 @@
>  #include <asm/ftrace.h>
>  #include <asm/syscall.h>
>  #include <asm/inst.h>
> +#include <asm/sections.h>
>  
>  #define	NUM_FTRACE_TRAMPS	2
>  static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
>  
> +unsigned long ftrace_call_adjust(unsigned long addr)
> +{
> +	if (addr >= (unsigned long)_sexittext && addr < (unsigned
> long)_eexittext)
> +		return 0;
> +
> +	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
> +		addr += MCOUNT_INSN_SIZE;
> +
> +	return addr;
> +}
> +
>  static ppc_inst_t ftrace_create_branch_inst(unsigned long ip,
> unsigned long addr, int link)
>  {
>  	ppc_inst_t op;
> diff --git a/arch/powerpc/kernel/vmlinux.lds.S
> b/arch/powerpc/kernel/vmlinux.lds.S
> index 1c5970df3233..9c376ae6857d 100644
> --- a/arch/powerpc/kernel/vmlinux.lds.S
> +++ b/arch/powerpc/kernel/vmlinux.lds.S
> @@ -281,7 +281,9 @@ SECTIONS
>  	 * to deal with references from __bug_table
>  	 */
>  	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
> +		_sexittext = .;
>  		EXIT_TEXT
> +		_eexittext = .;
>  	}
>  
>  	. = ALIGN(PAGE_SIZE);
> 
> base-commit: 4ef8376c466ae8b03e632dd8eca1e44315f7dd61

Reviewed-by: Benjamin Gray <bgray@linux.ibm.com>

Fixes what I assume is the same bug I was seeing for a while

[    0.000000][    T0] ftrace-powerpc: 0xc0000000030f4f38: No module provided for non-kernel address
[    0.000000][    T0] ------------[ ftrace bug ]------------
[    0.000000][    T0] ftrace faulted on modifying 
[    0.000000][    T0] [<c0000000030f4f38>] _sub_D_65535_0+0xc/0x40
[    0.000000][    T0] Initializing ftrace call sites
[    0.000000][    T0] ftrace record flags: 0
[    0.000000][    T0]  (0)    
[    0.000000][    T0]  expected tramp: c0000000000a8548
[    0.000000][    T0] ------------[ cut here ]------------
[    0.000000][    T0] WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2179 ftrace_bug+0x16c/0x3a0
[    0.000000][    T0] Modules linked in:
[    0.000000][    T0] CPU: 0 PID: 0 Comm: swapper Not tainted 6.8.0-rc3-02361-g892886f9f5b7-dirty #11
[    0.000000][    T0] Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
[    0.000000][    T0] NIP:  c00000000048a96c LR: c00000000048a968 CTR: 0000000000000000
[    0.000000][    T0] REGS: c000000004fa7ae0 TRAP: 0700   Not tainted  (6.8.0-rc3-02361-g892886f9f5b7-dirty)
[    0.000000][    T0] MSR:  8000000002021033 <SF,VEC,ME,IR,DR,RI,LE>  CR: 28028440  XER: 00000000
[    0.000000][    T0] CFAR: c00000000031db34 IRQMASK: 3 
[    0.000000][    T0] GPR00: c00000000048a968 c000000004fa7d80 c000000002645600 0000000000000022 
[    0.000000][    T0] GPR04: 0000000000000008 0000000000000001 c000000000326dc4 0000000000000000 
[    0.000000][    T0] GPR08: c000000002535600 0000000000000001 c00000000440bee0 0000000048028444 
[    0.000000][    T0] GPR12: 0000000000000000 c000000006250000 0000000000000000 0000000000000000 
[    0.000000][    T0] GPR16: 0000000000000000 0000000000000003 0000000000000001 0000000000000000 
[    0.000000][    T0] GPR20: c000000004fe8af8 c000000010030428 c0000000045b90c0 0000000000000000 
[    0.000000][    T0] GPR24: 0000000000000000 c000000010030430 c000000010030420 0000000000000000 
[    0.000000][    T0] GPR28: c000000001dd9a40 c000000001dd8bc0 c0000000102a5b00 c0000000102a5b08 
[    0.000000][    T0] NIP [c00000000048a96c] ftrace_bug+0x16c/0x3a0
[    0.000000][    T0] LR [c00000000048a968] ftrace_bug+0x168/0x3a0
[    0.000000][    T0] Call Trace:
[    0.000000][    T0] [c000000004fa7d80] [c00000000048a968] ftrace_bug+0x168/0x3a0 (unreliable)
[    0.000000][    T0] [c000000004fa7e20] [c00000000048aeec] ftrace_process_locs+0x34c/0x780
[    0.000000][    T0] [c000000004fa7ec0] [c0000000030655c0] ftrace_init+0xc8/0x27c
[    0.000000][    T0] [c000000004fa7f40] [c00000000300d458] start_kernel+0x1bc/0x528
[    0.000000][    T0] [c000000004fa7fe0] [c00000000000eaa0] start_here_common+0x1c/0x20
[    0.000000][    T0] Code: 7fe3fb78 483a31e5 60000000 e93e0008 75290800 40820150 7fc3f378 4bfffc95 7c641b78 387d0f40 4be93189 60000000 <0fe00000> 382100a0 3d22fda6 39000001 
[    0.000000][    T0] irq event stamp: 0
[    0.000000][    T0] hardirqs last  enabled at (0): [<0000000000000000>] 0x0
[    0.000000][    T0] hardirqs last disabled at (0): [<0000000000000000>] 0x0
[    0.000000][    T0] softirqs last  enabled at (0): [<0000000000000000>] 0x0
[    0.000000][    T0] softirqs last disabled at (0): [<0000000000000000>] 0x0
[    0.000000][    T0] ---[ end trace 0000000000000000 ]---
Naveen N Rao Feb. 13, 2024, 5:30 a.m. UTC | #4
On Mon, Feb 12, 2024 at 07:31:03PM +0000, Christophe Leroy wrote:
> 
> 
> Le 09/02/2024 à 08:59, Naveen N Rao a écrit :
> > diff --git a/arch/powerpc/include/asm/sections.h 
> > b/arch/powerpc/include/asm/sections.h
> > index ea26665f82cf..d389dcecdb0b 100644
> > --- a/arch/powerpc/include/asm/sections.h
> > +++ b/arch/powerpc/include/asm/sections.h
> > @@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
> >   
> >   extern char __head_end[];
> >   extern char __srwx_boundary[];
> > +extern char _sexittext[], _eexittext[];
> 
> Should we try to at least use the same symbols as others, or best try to 
> move this into include/asm-generic/sections.h, just like inittext ?

I used this name based on what is used for init text start and end in 
the generic code: _sinittext and _einittext.

> 
> $ git grep exittext
> arch/arm64/include/asm/sections.h:extern char __exittext_begin[], 
> __exittext_end[];

Arm64 also uses the non-standard __inittext_begin/__inittext_end, so it 
looks to be something very specific to arm64.

I do agree it would be good to refactor and unify names across 
architectures.


- Naveen
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 1ebd2ca97f12..d6babd083202 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -20,14 +20,7 @@ 
 #ifndef __ASSEMBLY__
 extern void _mcount(void);
 
-static inline unsigned long ftrace_call_adjust(unsigned long addr)
-{
-	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
-		addr += MCOUNT_INSN_SIZE;
-
-	return addr;
-}
-
+unsigned long ftrace_call_adjust(unsigned long addr);
 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
 				    unsigned long sp);
 
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index ea26665f82cf..d389dcecdb0b 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -14,6 +14,7 @@  typedef struct func_desc func_desc_t;
 
 extern char __head_end[];
 extern char __srwx_boundary[];
+extern char _sexittext[], _eexittext[];
 
 /* Patch sites */
 extern s32 patch__call_flush_branch_caches1;
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 82010629cf88..b5efd8d7bc01 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -27,10 +27,22 @@ 
 #include <asm/ftrace.h>
 #include <asm/syscall.h>
 #include <asm/inst.h>
+#include <asm/sections.h>
 
 #define	NUM_FTRACE_TRAMPS	2
 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
 
+unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	if (addr >= (unsigned long)_sexittext && addr < (unsigned long)_eexittext)
+		return 0;
+
+	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
+		addr += MCOUNT_INSN_SIZE;
+
+	return addr;
+}
+
 static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
 {
 	ppc_inst_t op;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 1c5970df3233..9c376ae6857d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -281,7 +281,9 @@  SECTIONS
 	 * to deal with references from __bug_table
 	 */
 	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+		_sexittext = .;
 		EXIT_TEXT
+		_eexittext = .;
 	}
 
 	. = ALIGN(PAGE_SIZE);