diff mbox series

[2/4] powerpc/kuap: Avoid useless jump_label on empty function

Message ID c2c9660cc4441dc37c477ce5cf60707c971bd2a1.1685963081.git.christophe.leroy@csgroup.eu (mailing list archive)
State Superseded
Headers show
Series [1/4] powerpc/kuap: Avoid unnecessary reads of MD_AP | expand

Commit Message

Christophe Leroy June 5, 2023, 11:04 a.m. UTC
Disassembly of interrupt_enter_prepare() shows a pointless nop before the mftb

  c000abf0 <interrupt_enter_prepare>:
  c000abf0:       81 23 00 84     lwz     r9,132(r3)
  c000abf4:       71 29 40 00     andi.   r9,r9,16384
  c000abf8:       41 82 00 28     beq-    c000ac20 <interrupt_enter_prepare+0x30>
  c000abfc: ===>  60 00 00 00     nop	<====
  c000ac00:       7d 0c 42 e6     mftb    r8
  c000ac04:       80 e2 00 08     lwz     r7,8(r2)
  c000ac08:       81 22 00 28     lwz     r9,40(r2)
  c000ac0c:       91 02 00 24     stw     r8,36(r2)
  c000ac10:       7d 29 38 50     subf    r9,r9,r7
  c000ac14:       7d 29 42 14     add     r9,r9,r8
  c000ac18:       91 22 00 08     stw     r9,8(r2)
  c000ac1c:       4e 80 00 20     blr
  c000ac20:       60 00 00 00     nop
  c000ac24:       7d 5a c2 a6     mfmd_ap r10
  c000ac28:       3d 20 de 00     lis     r9,-8704
  c000ac2c:       91 43 00 b0     stw     r10,176(r3)
  c000ac30:       7d 3a c3 a6     mtspr   794,r9
  c000ac34:       4e 80 00 20     blr

That comes from the call to kuap_loc(), allthough __kuap_lock() is an empty
function on the 8xx.

To avoid that, only perform kuap_is_disabled() check when there is something
to do with __kuap_lock().

Do the same with __kuap_save_and_lock() and __kuap_get_and_assert_locked().

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/book3s/32/kup.h     |  6 ++---
 arch/powerpc/include/asm/book3s/64/kup.h     | 10 +-------
 arch/powerpc/include/asm/kup.h               | 25 ++++++++++----------
 arch/powerpc/include/asm/nohash/32/kup-8xx.h | 11 ++++-----
 arch/powerpc/include/asm/nohash/kup-booke.h  |  8 +++++--
 5 files changed, 26 insertions(+), 34 deletions(-)

Comments

Nicholas Piggin June 6, 2023, 9:12 a.m. UTC | #1
On Mon Jun 5, 2023 at 9:04 PM AEST, Christophe Leroy wrote:
> Disassembly of interrupt_enter_prepare() shows a pointless nop before the mftb
>
>   c000abf0 <interrupt_enter_prepare>:
>   c000abf0:       81 23 00 84     lwz     r9,132(r3)
>   c000abf4:       71 29 40 00     andi.   r9,r9,16384
>   c000abf8:       41 82 00 28     beq-    c000ac20 <interrupt_enter_prepare+0x30>
>   c000abfc: ===>  60 00 00 00     nop	<====
>   c000ac00:       7d 0c 42 e6     mftb    r8
>   c000ac04:       80 e2 00 08     lwz     r7,8(r2)
>   c000ac08:       81 22 00 28     lwz     r9,40(r2)
>   c000ac0c:       91 02 00 24     stw     r8,36(r2)
>   c000ac10:       7d 29 38 50     subf    r9,r9,r7
>   c000ac14:       7d 29 42 14     add     r9,r9,r8
>   c000ac18:       91 22 00 08     stw     r9,8(r2)
>   c000ac1c:       4e 80 00 20     blr
>   c000ac20:       60 00 00 00     nop
>   c000ac24:       7d 5a c2 a6     mfmd_ap r10
>   c000ac28:       3d 20 de 00     lis     r9,-8704
>   c000ac2c:       91 43 00 b0     stw     r10,176(r3)
>   c000ac30:       7d 3a c3 a6     mtspr   794,r9
>   c000ac34:       4e 80 00 20     blr
>
> That comes from the call to kuap_loc(), allthough __kuap_lock() is an empty
> function on the 8xx.
>
> To avoid that, only perform kuap_is_disabled() check when there is something
> to do with __kuap_lock().
>
> Do the same with __kuap_save_and_lock() and __kuap_get_and_assert_locked().

Too bad static branch nops can't be eliminated.

> diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
> index 54cf46808157..1b0215ff3710 100644
> --- a/arch/powerpc/include/asm/book3s/64/kup.h
> +++ b/arch/powerpc/include/asm/book3s/64/kup.h
> @@ -297,15 +297,7 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
>  		WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
>  	return amr;
>  }
> -
> -/* Do nothing, book3s/64 does that in ASM */
> -static inline void __kuap_lock(void)
> -{
> -}
> -
> -static inline void __kuap_save_and_lock(struct pt_regs *regs)
> -{
> -}
> +#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked

Maybe leave in /* __kuap_lock notrequired, book3s/64 does that in ASM */
? Seems okay though

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

Thanks,
Nick
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 678f9c9d89b6..466a19cfb4df 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -77,10 +77,6 @@  static inline void kuap_unlock(unsigned long addr, bool ool)
 		kuap_unlock_all_ool();
 }
 
-static inline void __kuap_lock(void)
-{
-}
-
 static inline void __kuap_save_and_lock(struct pt_regs *regs)
 {
 	unsigned long kuap = current->thread.kuap;
@@ -92,6 +88,7 @@  static inline void __kuap_save_and_lock(struct pt_regs *regs)
 	current->thread.kuap = KUAP_NONE;
 	kuap_lock_addr(kuap, false);
 }
+#define __kuap_save_and_lock __kuap_save_and_lock
 
 static inline void kuap_user_restore(struct pt_regs *regs)
 {
@@ -120,6 +117,7 @@  static inline unsigned long __kuap_get_and_assert_locked(void)
 
 	return kuap;
 }
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
 
 static __always_inline void __allow_user_access(void __user *to, const void __user *from,
 						u32 size, unsigned long dir)
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 54cf46808157..1b0215ff3710 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -297,15 +297,7 @@  static inline unsigned long __kuap_get_and_assert_locked(void)
 		WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
 	return amr;
 }
-
-/* Do nothing, book3s/64 does that in ASM */
-static inline void __kuap_lock(void)
-{
-}
-
-static inline void __kuap_save_and_lock(struct pt_regs *regs)
-{
-}
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
 
 /*
  * We support individually allowing read or write, but we don't support nesting
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index d751ddd08110..0a4e07175612 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -52,16 +52,9 @@  __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 	return false;
 }
 
-static inline void __kuap_lock(void) { }
-static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
 static inline void kuap_user_restore(struct pt_regs *regs) { }
 static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
 
-static inline unsigned long __kuap_get_and_assert_locked(void)
-{
-	return 0;
-}
-
 /*
  * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
  * the L1D cache after user accesses. Only include the empty stubs for other
@@ -87,27 +80,32 @@  bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 
 static __always_inline void kuap_assert_locked(void)
 {
+#if defined(CONFIG_PPC_KUAP_DEBUG) && defined(__kuap_get_and_assert_locked)
 	if (kuap_is_disabled())
 		return;
 
-	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
-		__kuap_get_and_assert_locked();
+	__kuap_get_and_assert_locked();
+#endif
 }
 
 static __always_inline void kuap_lock(void)
 {
+#ifdef __kuap_lock
 	if (kuap_is_disabled())
 		return;
 
 	__kuap_lock();
+#endif
 }
 
 static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
 {
+#ifdef __kuap_save_and_lock
 	if (kuap_is_disabled())
 		return;
 
 	__kuap_save_and_lock(regs);
+#endif
 }
 
 static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
@@ -120,10 +118,11 @@  static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned l
 
 static __always_inline unsigned long kuap_get_and_assert_locked(void)
 {
-	if (kuap_is_disabled())
-		return 0;
-
-	return __kuap_get_and_assert_locked();
+#ifdef __kuap_get_and_assert_locked
+	if (!kuap_is_disabled())
+		return __kuap_get_and_assert_locked();
+#endif
+	return 0;
 }
 
 #ifndef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 8579210f2a6a..a372cd822887 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -20,15 +20,12 @@  static __always_inline bool kuap_is_disabled(void)
 	return static_branch_unlikely(&disable_kuap_key);
 }
 
-static inline void __kuap_lock(void)
-{
-}
-
 static inline void __kuap_save_and_lock(struct pt_regs *regs)
 {
 	regs->kuap = mfspr(SPRN_MD_AP);
 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
+#define __kuap_save_and_lock __kuap_save_and_lock
 
 static inline void kuap_user_restore(struct pt_regs *regs)
 {
@@ -39,13 +36,15 @@  static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
 	mtspr(SPRN_MD_AP, regs->kuap);
 }
 
+#ifdef CONFIG_PPC_KUAP_DEBUG
 static inline unsigned long __kuap_get_and_assert_locked(void)
 {
-	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
-		WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16);
+	WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16);
 
 	return 0;
 }
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
+#endif
 
 static inline void __allow_user_access(void __user *to, const void __user *from,
 				       unsigned long size, unsigned long dir)
diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h
index 823c5a3a96d8..71182cbe20c3 100644
--- a/arch/powerpc/include/asm/nohash/kup-booke.h
+++ b/arch/powerpc/include/asm/nohash/kup-booke.h
@@ -30,6 +30,7 @@  static inline void __kuap_lock(void)
 	mtspr(SPRN_PID, 0);
 	isync();
 }
+#define __kuap_lock __kuap_lock
 
 static inline void __kuap_save_and_lock(struct pt_regs *regs)
 {
@@ -37,6 +38,7 @@  static inline void __kuap_save_and_lock(struct pt_regs *regs)
 	mtspr(SPRN_PID, 0);
 	isync();
 }
+#define __kuap_save_and_lock __kuap_save_and_lock
 
 static inline void kuap_user_restore(struct pt_regs *regs)
 {
@@ -56,13 +58,15 @@  static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
 	/* Context synchronisation is performed by rfi */
 }
 
+#ifdef CONFIG_PPC_KUAP_DEBUG
 static inline unsigned long __kuap_get_and_assert_locked(void)
 {
-	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
-		WARN_ON_ONCE(mfspr(SPRN_PID));
+	WARN_ON_ONCE(mfspr(SPRN_PID));
 
 	return 0;
 }
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
+#endif
 
 static inline void __allow_user_access(void __user *to, const void __user *from,
 				       unsigned long size, unsigned long dir)