@@ -1109,3 +1109,8 @@ DEF_HELPER_4(vid_v_b, void, ptr, ptr, env, i32)
DEF_HELPER_4(vid_v_h, void, ptr, ptr, env, i32)
DEF_HELPER_4(vid_v_w, void, ptr, ptr, env, i32)
DEF_HELPER_4(vid_v_d, void, ptr, ptr, env, i32)
+
+DEF_HELPER_3(vmv_s_x_b, void, ptr, tl, env)
+DEF_HELPER_3(vmv_s_x_h, void, ptr, tl, env)
+DEF_HELPER_3(vmv_s_x_w, void, ptr, tl, env)
+DEF_HELPER_3(vmv_s_x_d, void, ptr, tl, env)
@@ -562,6 +562,7 @@ vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm
viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm
vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm
vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
+vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -2438,3 +2438,29 @@ static bool trans_vext_x_v(DisasContext *s, arg_r *a)
tcg_temp_free(dest);
return true;
}
+
+/* Integer Scalar Move Instruction */
+typedef void gen_helper_vmv_s_x(TCGv_ptr, TCGv, TCGv_env);
+static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+{
+ if (vext_check_isa_ill(s)) {
+ TCGv_ptr dest;
+ TCGv src1;
+ static gen_helper_vmv_s_x * const fns[4] = {
+ gen_helper_vmv_s_x_b, gen_helper_vmv_s_x_h,
+ gen_helper_vmv_s_x_w, gen_helper_vmv_s_x_d
+ };
+
+ src1 = tcg_temp_new();
+ dest = tcg_temp_new_ptr();
+ gen_get_gpr(src1, a->rs1);
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+
+ fns[s->sew](dest, src1, cpu_env);
+
+ tcg_temp_free(src1);
+ tcg_temp_free_ptr(dest);
+ return true;
+ }
+ return false;
+}
@@ -4673,3 +4673,21 @@ GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb)
GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh)
GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl)
GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
+
+/*
+ *** Vector Permutation Instructions
+ */
+/* Integer Scalar Move Instruction */
+#define GEN_VEXT_VMV_S_X(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, target_ulong s1, CPURISCVState *env) \
+{ \
+ if (env->vl == 0) { \
+ return; \
+ } \
+ *((ETYPE *)vd + H(0)) = s1; \
+ CLEAR_FN(vd, 1, sizeof(ETYPE), env_archcpu(env)->cfg.vlen / 8); \
+}
+GEN_VEXT_VMV_S_X(vmv_s_x_b, uint8_t, H1, clearb)
+GEN_VEXT_VMV_S_X(vmv_s_x_h, uint16_t, H2, clearh)
+GEN_VEXT_VMV_S_X(vmv_s_x_w, uint32_t, H4, clearl)
+GEN_VEXT_VMV_S_X(vmv_s_x_d, uint64_t, H8, clearq)