diff mbox series

[v4,40/45] linux-user/aarch64: Implement SME signal handling

Message ID 20220628042117.368549-41-richard.henderson@linaro.org
State New
Headers show
Series target/arm: Scalable Matrix Extension | expand

Commit Message

Richard Henderson June 28, 2022, 4:21 a.m. UTC
Set the SM bit in the SVE record on signal delivery, create the ZA record.
Restore SM and ZA state according to the records present on return.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 linux-user/aarch64/signal.c | 162 +++++++++++++++++++++++++++++++++---
 1 file changed, 151 insertions(+), 11 deletions(-)

Comments

Peter Maydell July 4, 2022, 1:05 p.m. UTC | #1
On Tue, 28 Jun 2022 at 05:53, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Set the SM bit in the SVE record on signal delivery, create the ZA record.
> Restore SM and ZA state according to the records present on return.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  linux-user/aarch64/signal.c | 162 +++++++++++++++++++++++++++++++++---
>  1 file changed, 151 insertions(+), 11 deletions(-)
>
> diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
> index 22d0b8b4ec..1ad125d3d9 100644
> --- a/linux-user/aarch64/signal.c
> +++ b/linux-user/aarch64/signal.c
> @@ -104,6 +104,22 @@ struct target_sve_context {
>
>  #define TARGET_SVE_SIG_FLAG_SM  1
>
> +#define TARGET_ZA_MAGIC        0x54366345
> +
> +struct target_za_context {
> +    struct target_aarch64_ctx head;
> +    uint16_t vl;
> +    uint16_t reserved[3];
> +    /* The actual ZA data immediately follows. */
> +};
> +
> +#define TARGET_ZA_SIG_REGS_OFFSET \
> +    QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
> +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
> +    (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
> +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
> +    TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
> +
>  struct target_rt_sigframe {
>      struct target_siginfo info;
>      struct target_ucontext uc;
> @@ -176,9 +192,9 @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
>  }
>
>  static void target_setup_sve_record(struct target_sve_context *sve,
> -                                    CPUARMState *env, int vq, int size)
> +                                    CPUARMState *env, int size)
>  {
> -    int i, j;
> +    int i, j, vq = sme_vq(env);


Shouldn't this be sve_vq() ?

>
>      memset(sve, 0, sizeof(*sve));
>      __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
> @@ -207,6 +223,34 @@ static void target_setup_sve_record(struct target_sve_context *sve,
>      }
>  }
>
> +static void target_setup_za_record(struct target_za_context *za,
> +                                   CPUARMState *env, int size)
> +{
> +    int vq = sme_vq(env);
> +    int vl = vq * TARGET_SVE_VQ_BYTES;
> +    int i, j;
> +
> +    memset(za, 0, sizeof(*za));
> +    __put_user(TARGET_ZA_MAGIC, &za->head.magic);
> +    __put_user(size, &za->head.size);
> +    __put_user(vl, &za->vl);
> +
> +    if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
> +        return;
> +    }

I know we always set size in code we control, but it feels
a bit fragile to assume that if the size isn't the "for zero
data" case then it definitely has enough space to put all
the ZA data in it. Maybe we could assert() that the code
below isn't going to overrun size ?


> +
> +    /*
> +     * Note that ZA vectors are stored as a byte stream,
> +     * with each byte element at a subsequent address.
> +     */
> +    for (i = 0; i < vl; ++i) {
> +        uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
> +        for (j = 0; j < vq * 2; ++j) {
> +            __put_user_e(env->zarray[i].d[j], z + j, le);
> +        }
> +    }
> +}
> +
>  static void target_restore_general_frame(CPUARMState *env,
>                                           struct target_rt_sigframe *sf)
>  {
> @@ -252,16 +296,28 @@ static void target_restore_fpsimd_record(CPUARMState *env,
>
>  static bool target_restore_sve_record(CPUARMState *env,
>                                        struct target_sve_context *sve,
> -                                      int size)
> +                                      int size, int *svcr)
>  {
> -    int i, j, vl, vq;
> +    int i, j, vl, vq, flags;
> +    bool sm;
>
> +    /* ??? Kernel tests SVE && (!sm || SME); suggest (sm ? SME : SVE). */

This is fixed upstream (hasn't made it into Linus' git tree yet
but will at some point):
https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/commit/?id=df07443f477a

so we might as well follow suit.

>      if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
>          return false;
>      }
>
>      __get_user(vl, &sve->vl);
> -    vq = sve_vq(env);
> +    __get_user(flags, &sve->flags);
> +
> +    sm = flags & TARGET_SVE_SIG_FLAG_SM;
> +    if (sm) {
> +        if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
> +            return false;
> +        }
> +        vq = sme_vq(env);
> +    } else {
> +        vq = sve_vq(env);
> +    }
>
>      /* Reject mismatched VL. */
>      if (vl != vq * TARGET_SVE_VQ_BYTES) {

Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM
diff mbox series

Patch

diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index 22d0b8b4ec..1ad125d3d9 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -104,6 +104,22 @@  struct target_sve_context {
 
 #define TARGET_SVE_SIG_FLAG_SM  1
 
+#define TARGET_ZA_MAGIC        0x54366345
+
+struct target_za_context {
+    struct target_aarch64_ctx head;
+    uint16_t vl;
+    uint16_t reserved[3];
+    /* The actual ZA data immediately follows. */
+};
+
+#define TARGET_ZA_SIG_REGS_OFFSET \
+    QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
+    (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
+    TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+
 struct target_rt_sigframe {
     struct target_siginfo info;
     struct target_ucontext uc;
@@ -176,9 +192,9 @@  static void target_setup_end_record(struct target_aarch64_ctx *end)
 }
 
 static void target_setup_sve_record(struct target_sve_context *sve,
-                                    CPUARMState *env, int vq, int size)
+                                    CPUARMState *env, int size)
 {
-    int i, j;
+    int i, j, vq = sme_vq(env);
 
     memset(sve, 0, sizeof(*sve));
     __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
@@ -207,6 +223,34 @@  static void target_setup_sve_record(struct target_sve_context *sve,
     }
 }
 
+static void target_setup_za_record(struct target_za_context *za,
+                                   CPUARMState *env, int size)
+{
+    int vq = sme_vq(env);
+    int vl = vq * TARGET_SVE_VQ_BYTES;
+    int i, j;
+
+    memset(za, 0, sizeof(*za));
+    __put_user(TARGET_ZA_MAGIC, &za->head.magic);
+    __put_user(size, &za->head.size);
+    __put_user(vl, &za->vl);
+
+    if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+        return;
+    }
+
+    /*
+     * Note that ZA vectors are stored as a byte stream,
+     * with each byte element at a subsequent address.
+     */
+    for (i = 0; i < vl; ++i) {
+        uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+        for (j = 0; j < vq * 2; ++j) {
+            __put_user_e(env->zarray[i].d[j], z + j, le);
+        }
+    }
+}
+
 static void target_restore_general_frame(CPUARMState *env,
                                          struct target_rt_sigframe *sf)
 {
@@ -252,16 +296,28 @@  static void target_restore_fpsimd_record(CPUARMState *env,
 
 static bool target_restore_sve_record(CPUARMState *env,
                                       struct target_sve_context *sve,
-                                      int size)
+                                      int size, int *svcr)
 {
-    int i, j, vl, vq;
+    int i, j, vl, vq, flags;
+    bool sm;
 
+    /* ??? Kernel tests SVE && (!sm || SME); suggest (sm ? SME : SVE). */
     if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
         return false;
     }
 
     __get_user(vl, &sve->vl);
-    vq = sve_vq(env);
+    __get_user(flags, &sve->flags);
+
+    sm = flags & TARGET_SVE_SIG_FLAG_SM;
+    if (sm) {
+        if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+            return false;
+        }
+        vq = sme_vq(env);
+    } else {
+        vq = sve_vq(env);
+    }
 
     /* Reject mismatched VL. */
     if (vl != vq * TARGET_SVE_VQ_BYTES) {
@@ -278,6 +334,8 @@  static bool target_restore_sve_record(CPUARMState *env,
         return false;
     }
 
+    *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
+
     /*
      * Note that SVE regs are stored as a byte stream, with each byte element
      * at a subsequent address.  This corresponds to a little-endian load
@@ -304,15 +362,57 @@  static bool target_restore_sve_record(CPUARMState *env,
     return true;
 }
 
+static bool target_restore_za_record(CPUARMState *env,
+                                     struct target_za_context *za,
+                                     int size, int *svcr)
+{
+    int i, j, vl, vq;
+
+    if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+        return false;
+    }
+
+    __get_user(vl, &za->vl);
+    vq = sme_vq(env);
+
+    /* Reject mismatched VL. */
+    if (vl != vq * TARGET_SVE_VQ_BYTES) {
+        return false;
+    }
+
+    /* Accept empty record -- used to clear PSTATE.ZA. */
+    if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+        return true;
+    }
+
+    /* Reject non-empty but incomplete record. */
+    if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
+        return false;
+    }
+
+    *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
+
+    for (i = 0; i < vl; ++i) {
+        uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+        for (j = 0; j < vq * 2; ++j) {
+            __get_user_e(env->zarray[i].d[j], z + j, le);
+        }
+    }
+    return true;
+}
+
 static int target_restore_sigframe(CPUARMState *env,
                                    struct target_rt_sigframe *sf)
 {
     struct target_aarch64_ctx *ctx, *extra = NULL;
     struct target_fpsimd_context *fpsimd = NULL;
     struct target_sve_context *sve = NULL;
+    struct target_za_context *za = NULL;
     uint64_t extra_datap = 0;
     bool used_extra = false;
     int sve_size = 0;
+    int za_size = 0;
+    int svcr = 0;
 
     target_restore_general_frame(env, sf);
 
@@ -350,6 +450,14 @@  static int target_restore_sigframe(CPUARMState *env,
             sve_size = size;
             break;
 
+        case TARGET_ZA_MAGIC:
+            if (za || size < sizeof(struct target_za_context)) {
+                goto err;
+            }
+            za = (struct target_za_context *)ctx;
+            za_size = size;
+            break;
+
         case TARGET_EXTRA_MAGIC:
             if (extra || size != sizeof(struct target_extra_context)) {
                 goto err;
@@ -381,9 +489,16 @@  static int target_restore_sigframe(CPUARMState *env,
     }
 
     /* SVE data, if present, overwrites FPSIMD data.  */
-    if (sve && !target_restore_sve_record(env, sve, sve_size)) {
+    if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
         goto err;
     }
+    if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
+        goto err;
+    }
+    if (env->svcr != svcr) {
+        env->svcr = svcr;
+        arm_rebuild_hflags(env);
+    }
     unlock_user(extra, extra_datap, 0);
     return 0;
 
@@ -451,7 +566,8 @@  static void target_setup_frame(int usig, struct target_sigaction *ka,
         .total_size = offsetof(struct target_rt_sigframe,
                                uc.tuc_mcontext.__reserved),
     };
-    int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
+    int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
+    int sve_size = 0, za_size = 0;
     struct target_rt_sigframe *frame;
     struct target_rt_frame_record *fr;
     abi_ulong frame_addr, return_addr;
@@ -461,11 +577,20 @@  static void target_setup_frame(int usig, struct target_sigaction *ka,
                                       &layout);
 
     /* SVE state needs saving only if it exists.  */
-    if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
-        vq = sve_vq(env);
-        sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
+    if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
+        cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+        sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
         sve_ofs = alloc_sigframe_space(sve_size, &layout);
     }
+    if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+        /* ZA state needs saving only if it is enabled.  */
+        if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+            za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(0));
+        } else {
+            za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
+        }
+        za_ofs = alloc_sigframe_space(za_size, &layout);
+    }
 
     if (layout.extra_ofs) {
         /* Reserve space for the extra end marker.  The standard end marker
@@ -512,7 +637,10 @@  static void target_setup_frame(int usig, struct target_sigaction *ka,
         target_setup_end_record((void *)frame + layout.extra_end_ofs);
     }
     if (sve_ofs) {
-        target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
+        target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
+    }
+    if (za_ofs) {
+        target_setup_za_record((void *)frame + za_ofs, env, za_size);
     }
 
     /* Set up the stack frame for unwinding.  */
@@ -536,6 +664,18 @@  static void target_setup_frame(int usig, struct target_sigaction *ka,
         env->btype = 2;
     }
 
+    /*
+     * Invoke the signal handler with both SM and ZA disabled.
+     * When clearing SM, ResetSVEState, per SMSTOP.
+     */
+    if (FIELD_EX64(env->svcr, SVCR, SM)) {
+        arm_reset_sve_state(env);
+    }
+    if (env->svcr) {
+        env->svcr = 0;
+        arm_rebuild_hflags(env);
+    }
+
     if (info) {
         tswap_siginfo(&frame->info, info);
         env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);