diff mbox series

[35/65] target/riscv: Add narrowing fixed-point clip instructions for XTheadVector

Message ID 20240412073735.76413-36-eric.huang@linux.alibaba.com
State New
Headers show
Series target/riscv: Support XTheadVector extension | expand

Commit Message

Huang Tao April 12, 2024, 7:37 a.m. UTC
The instructions have the same function as RVV1.0. Overall there are only
general differences between XTheadVector and RVV1.0.

Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
 target/riscv/helper.h                         | 13 +++++++++
 .../riscv/insn_trans/trans_xtheadvector.c.inc | 14 +++++----
 target/riscv/vector_helper.c                  | 26 ++++-------------
 target/riscv/vector_internals.h               | 14 +++++++++
 target/riscv/xtheadvector_helper.c            | 29 +++++++++++++++++++
 5 files changed, 70 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 70d3f34a59..6254be771f 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1983,3 +1983,16 @@  DEF_HELPER_6(th_vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(th_vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(th_vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(th_vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(th_vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn_trans/trans_xtheadvector.c.inc b/target/riscv/insn_trans/trans_xtheadvector.c.inc
index d1f523832b..108f3249d0 100644
--- a/target/riscv/insn_trans/trans_xtheadvector.c.inc
+++ b/target/riscv/insn_trans/trans_xtheadvector.c.inc
@@ -1738,18 +1738,20 @@  GEN_OPIVX_TRANS_TH(th_vssra_vx,  opivx_check_th)
 GEN_OPIVI_TRANS_TH(th_vssrl_vi, IMM_TRUNC_SEW, th_vssrl_vx, opivx_check_th)
 GEN_OPIVI_TRANS_TH(th_vssra_vi, IMM_TRUNC_SEW, th_vssra_vx, opivx_check_th)
 
+/* Vector Narrowing Fixed-Point Clip Instructions */
+GEN_OPIVV_NARROW_TRANS_TH(th_vnclipu_vv)
+GEN_OPIVV_NARROW_TRANS_TH(th_vnclip_vv)
+GEN_OPIVX_NARROW_TRANS_TH(th_vnclipu_vx)
+GEN_OPIVX_NARROW_TRANS_TH(th_vnclip_vx)
+GEN_OPIVI_NARROW_TRANS_TH(th_vnclipu_vi, IMM_ZX, th_vnclipu_vx)
+GEN_OPIVI_NARROW_TRANS_TH(th_vnclip_vi, IMM_ZX, th_vnclip_vx)
+
 #define TH_TRANS_STUB(NAME)                                \
 static bool trans_##NAME(DisasContext *s, arg_##NAME *a)   \
 {                                                          \
     return require_xtheadvector(s);                        \
 }
 
-TH_TRANS_STUB(th_vnclipu_vv)
-TH_TRANS_STUB(th_vnclipu_vx)
-TH_TRANS_STUB(th_vnclipu_vi)
-TH_TRANS_STUB(th_vnclip_vv)
-TH_TRANS_STUB(th_vnclip_vx)
-TH_TRANS_STUB(th_vnclip_vi)
 TH_TRANS_STUB(th_vfadd_vv)
 TH_TRANS_STUB(th_vfadd_vf)
 TH_TRANS_STUB(th_vfsub_vv)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index be1f1bc8e2..262cb28824 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -646,14 +646,6 @@  GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
  * Vector Integer Arithmetic Instructions
  */
 
-/* (TD, T1, T2, TX1, TX2) */
-#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
-#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
-#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
-#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
-#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
-#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
-
 #define DO_SUB(N, M) (N - M)
 #define DO_RSUB(N, M) (M - N)
 
@@ -2677,8 +2669,7 @@  GEN_VEXT_VX_RM(vssra_vx_w, 4)
 GEN_VEXT_VX_RM(vssra_vx_d, 8)
 
 /* Vector Narrowing Fixed-Point Clip Instructions */
-static inline int8_t
-vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
+int8_t vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
 {
     uint8_t round, shift = b & 0xf;
     int16_t res;
@@ -2696,8 +2687,7 @@  vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
     }
 }
 
-static inline int16_t
-vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
+int16_t vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
 {
     uint8_t round, shift = b & 0x1f;
     int32_t res;
@@ -2715,8 +2705,7 @@  vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
     }
 }
 
-static inline int32_t
-vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
+int32_t vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
 {
     uint8_t round, shift = b & 0x3f;
     int64_t res;
@@ -2748,8 +2737,7 @@  GEN_VEXT_VX_RM(vnclip_wx_b, 1)
 GEN_VEXT_VX_RM(vnclip_wx_h, 2)
 GEN_VEXT_VX_RM(vnclip_wx_w, 4)
 
-static inline uint8_t
-vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
+uint8_t vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
 {
     uint8_t round, shift = b & 0xf;
     uint16_t res;
@@ -2764,8 +2752,7 @@  vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
     }
 }
 
-static inline uint16_t
-vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
+uint16_t vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
 {
     uint8_t round, shift = b & 0x1f;
     uint32_t res;
@@ -2780,8 +2767,7 @@  vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
     }
 }
 
-static inline uint32_t
-vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+uint32_t vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
 {
     uint8_t round, shift = b & 0x3f;
     uint64_t res;
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index 02b5fd49f0..a42dc080ec 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -255,6 +255,12 @@  void HELPER(NAME)(void *vd, void *v0, target_ulong s1,    \
 #define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
 #define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
 #define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
+#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
+#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
+#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
+#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
+#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
+#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
 
 /* share functions */
 static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
@@ -326,4 +332,12 @@  int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b);
 int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b);
 int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
 
+int8_t vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b);
+int16_t vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b);
+int32_t vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b);
+
+uint8_t vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b);
+uint16_t vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b);
+uint32_t vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b);
+
 #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
diff --git a/target/riscv/xtheadvector_helper.c b/target/riscv/xtheadvector_helper.c
index 8cd3fd028b..2e97a95392 100644
--- a/target/riscv/xtheadvector_helper.c
+++ b/target/riscv/xtheadvector_helper.c
@@ -2561,3 +2561,32 @@  GEN_TH_VX_RM(th_vssra_vx_b, 1, 1, clearb_th)
 GEN_TH_VX_RM(th_vssra_vx_h, 2, 2, clearh_th)
 GEN_TH_VX_RM(th_vssra_vx_w, 4, 4, clearl_th)
 GEN_TH_VX_RM(th_vssra_vx_d, 8, 8, clearq_th)
+
+/* Vector Narrowing Fixed-Point Clip Instructions */
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
+GEN_TH_VV_RM(th_vnclip_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vnclip_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vnclip_vv_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
+GEN_TH_VX_RM(th_vnclip_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vnclip_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vnclip_vx_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
+GEN_TH_VV_RM(th_vnclipu_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vnclipu_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vnclipu_vv_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
+GEN_TH_VX_RM(th_vnclipu_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vnclipu_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vnclipu_vx_w, 4, 4, clearl_th)