diff mbox series

[v5,2/3] aarch64: Set the syscall register right before doing the syscall.

Message ID 20230419135821.4113156-3-josimmon@redhat.com
State New
Headers show
Series x86_64: aarch64: Set call number just before syscall | expand

Commit Message

Joe Simmons-Talbott April 19, 2023, 1:58 p.m. UTC
To make identifying syscalls easier during call tree analysis load the
syscall number just before performing the syscall.

Compiler optimizations can place quite a few instructions between the
setting of the syscall number and the syscall instruction.  During call
tree analysis the number of instructions between the two can lead to
more difficulty for both tools and humans in properly identifying the
syscall number.  Having the syscall number set in the prior instruction
to the syscall instruction makes this task easier and less error prone.
Being able to reliably identify syscalls made by a given API will make
it easier to understand and verify the safety and security of glibc.
---
 sysdeps/unix/sysv/linux/aarch64/sysdep.h | 29 ++++++++++++++++--------
 1 file changed, 20 insertions(+), 9 deletions(-)

Comments

Szabolcs Nagy April 19, 2023, 2:56 p.m. UTC | #1
The 04/19/2023 09:58, Joe Simmons-Talbott via Libc-alpha wrote:
> --- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> @@ -168,15 +168,26 @@
>  # define HAVE_CLONE3_WRAPPER		1
>  
>  # undef INTERNAL_SYSCALL_RAW
> -# define INTERNAL_SYSCALL_RAW(name, nr, args...)		\
> -  ({ long _sys_result;						\
> -     {								\
> -       LOAD_ARGS_##nr (args)					\
> -       register long _x8 asm ("x8") = (name);			\
> -       asm volatile ("svc	0	// syscall " # name     \
> -		     : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory");	\
> -       _sys_result = _x0;					\
> -     }								\
> +# define INTERNAL_SYSCALL_RAW(name, nr, args...)			\
> +  ({ long _sys_result;							\
> +     {									\
> +       LOAD_ARGS_##nr (args)						\
> +       if (__builtin_constant_p(name))					\
> +         asm volatile ("mov	x8, %1	// syscall " # name "\n"	\
> +		       "svc	0"					\
> +                       : "=r" (_x0)					\
> +		       : "i" (name) ASM_ARGS_##nr			\
> +		       : "x8", "memory");				\
> +       else								\
> +         {								\
> +           register long _x8 asm ("x8") = (name);			\
> +           asm volatile ("svc	0\n\t"					\

why \n\t ?

i don't think that's needed.

> +		         : "=r" (_x0)					\
> +		         : "r"(_x8) ASM_ARGS_##nr			\
> +		         : "memory");					\
> +         }								\
> +       _sys_result = _x0;						\
> +     }									\
>       _sys_result; })
>  
>  # undef INTERNAL_SYSCALL
Joe Simmons-Talbott April 19, 2023, 3:21 p.m. UTC | #2
On Wed, Apr 19, 2023 at 03:56:05PM +0100, Szabolcs Nagy wrote:
> The 04/19/2023 09:58, Joe Simmons-Talbott via Libc-alpha wrote:
> > --- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> > +++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
> > @@ -168,15 +168,26 @@
> >  # define HAVE_CLONE3_WRAPPER		1
> >  
> >  # undef INTERNAL_SYSCALL_RAW
> > -# define INTERNAL_SYSCALL_RAW(name, nr, args...)		\
> > -  ({ long _sys_result;						\
> > -     {								\
> > -       LOAD_ARGS_##nr (args)					\
> > -       register long _x8 asm ("x8") = (name);			\
> > -       asm volatile ("svc	0	// syscall " # name     \
> > -		     : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory");	\
> > -       _sys_result = _x0;					\
> > -     }								\
> > +# define INTERNAL_SYSCALL_RAW(name, nr, args...)			\
> > +  ({ long _sys_result;							\
> > +     {									\
> > +       LOAD_ARGS_##nr (args)						\
> > +       if (__builtin_constant_p(name))					\
> > +         asm volatile ("mov	x8, %1	// syscall " # name "\n"	\
> > +		       "svc	0"					\
> > +                       : "=r" (_x0)					\
> > +		       : "i" (name) ASM_ARGS_##nr			\
> > +		       : "x8", "memory");				\
> > +       else								\
> > +         {								\
> > +           register long _x8 asm ("x8") = (name);			\
> > +           asm volatile ("svc	0\n\t"					\
> 
> why \n\t ?
> 
> i don't think that's needed.

I'll remove it in v6.

Thanks,
Joe
diff mbox series

Patch

diff --git a/sysdeps/unix/sysv/linux/aarch64/sysdep.h b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
index e94d1703ad..6fe40aaf89 100644
--- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h
+++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h
@@ -168,15 +168,26 @@ 
 # define HAVE_CLONE3_WRAPPER		1
 
 # undef INTERNAL_SYSCALL_RAW
-# define INTERNAL_SYSCALL_RAW(name, nr, args...)		\
-  ({ long _sys_result;						\
-     {								\
-       LOAD_ARGS_##nr (args)					\
-       register long _x8 asm ("x8") = (name);			\
-       asm volatile ("svc	0	// syscall " # name     \
-		     : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory");	\
-       _sys_result = _x0;					\
-     }								\
+# define INTERNAL_SYSCALL_RAW(name, nr, args...)			\
+  ({ long _sys_result;							\
+     {									\
+       LOAD_ARGS_##nr (args)						\
+       if (__builtin_constant_p(name))					\
+         asm volatile ("mov	x8, %1	// syscall " # name "\n"	\
+		       "svc	0"					\
+                       : "=r" (_x0)					\
+		       : "i" (name) ASM_ARGS_##nr			\
+		       : "x8", "memory");				\
+       else								\
+         {								\
+           register long _x8 asm ("x8") = (name);			\
+           asm volatile ("svc	0\n\t"					\
+		         : "=r" (_x0)					\
+		         : "r"(_x8) ASM_ARGS_##nr			\
+		         : "memory");					\
+         }								\
+       _sys_result = _x0;						\
+     }									\
      _sys_result; })
 
 # undef INTERNAL_SYSCALL