@@ -600,6 +600,16 @@ typedef enum {
LP_EXPECTED = 1,
} cfi_elp;
+#define LPLR_UL (((1 << 8) - 1) << 17)
+#define LPLR_ML (((1 << 8) - 1) << 9)
+#define LPLR_LL ((1 << 9) - 1)
+
+typedef enum {
+ FCFI_LPLL = 0,
+ FCFI_ML = 1,
+ FCFI_UL = 2,
+} cfi_label_inst;
+
/* hstatus CSR bits */
#define HSTATUS_VSBE 0x00000020
#define HSTATUS_GVA 0x00000040
@@ -97,6 +97,11 @@ DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
+/* Forward CFI label checking */
+DEF_HELPER_2(cfi_jalr, void, env, int)
+DEF_HELPER_3(cfi_check_landing_pad, void, env, int, int)
+DEF_HELPER_3(cfi_set_landing_pad, void, env, int, int)
+
/* Special functions */
DEF_HELPER_2(csrr, tl, env, int)
DEF_HELPER_3(csrw, void, env, int, tl)
@@ -112,6 +117,8 @@ DEF_HELPER_1(tlb_flush, void, env)
/* Native Debug */
DEF_HELPER_1(itrigger_match, void, env)
#endif
+/* helper for back cfi mismatch */
+DEF_HELPER_1(sschkra_mismatch, void, env)
/* Hypervisor functions */
#ifndef CONFIG_USER_ONLY
@@ -37,6 +37,8 @@
%imm_u 12:s20 !function=ex_shift_12
%imm_bs 30:2 !function=ex_shift_3
%imm_rnum 20:4
+%imm_cfi9 15:9
+%imm_cfi8 15:8
# Argument sets:
&empty
@@ -163,6 +165,33 @@ csrrwi ............ ..... 101 ..... 1110011 @csr
csrrsi ............ ..... 110 ..... 1110011 @csr
csrrci ............ ..... 111 ..... 1110011 @csr
+# zimops (unpriv integer may be operations) instructions with system opcode
+# These're superset of for cfi encodings. zimops_r and zimops_rr should be last
+# entry in below overlapping patterns so that it acts as final sink for overlapping patterns.
+# Any new encoding that can be used should be placed above mop.r and mop.rr
+
+# cfi instructions carved out of mop.r
+{
+ sspush 100000 0 11100 ..... 100 00000 1110011 %rs1
+ sspop 100000 0 11100 00000 100 ..... 1110011 %rd
+ ssprr 100000 0 11101 00000 100 ..... 1110011 %rd
+ zimops_r 1-00-- 0 111-- ----- 100 ..... 1110011 %rd
+}
+
+# cfi instructions carved out of mop.rr
+{
+ sschckra 100010 1 00001 00101 100 00000 1110011
+ ssamoswap 100000 1 ..... ..... 100 ..... 1110011 @r
+
+ lpsll 100000 1 0 ......... 100 00000 1110011 %imm_cfi9
+ lpcll 100000 1 1 ......... 100 00000 1110011 %imm_cfi9
+ lpsml 100001 1 0 0........ 100 00000 1110011 %imm_cfi8
+ lpcml 100001 1 0 1........ 100 00000 1110011 %imm_cfi8
+ lpsul 100010 1 1 0........ 100 00000 1110011 %imm_cfi8
+ lpcul 100010 1 1 1........ 100 00000 1110011 %imm_cfi8
+ zimops_rr 1-00-- 1 - --------- 100 ..... 1110011 %rd
+}
+
# *** RV64I Base Instruction Set (in addition to RV32I) ***
lwu ............ ..... 110 ..... 0000011 @i
ld ............ ..... 011 ..... 0000011 @i
@@ -66,6 +66,20 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
}
gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
+
+ if (ctx->cfg_ptr->ext_cfi) {
+ /*
+ * Rely on a helper to check the forward CFI enable for the
+ * current process mode. The alternatives would be (1) include
+ * "fcfi enabled" in the cflags or (2) maintain a "fcfi
+ * currently enabled" in cpu_env and emit TCG code to access
+ * and test it.
+ */
+ if (a->rd == xRA || a->rd == xT0 || (a->rs1 != xRA && a->rs1 != xT0)) {
+ gen_helper_cfi_jalr(cpu_env, tcg_constant_i32(LP_EXPECTED));
+ }
+ }
+
lookup_and_goto_ptr(ctx);
if (misaligned) {
new file mode 100644
@@ -0,0 +1,53 @@
+/*
+ * RISC-V translation routines for the Control-Flow Integrity Extension
+ *
+ * Copyright (c) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ static bool trans_zimops_r(DisasContext *ctx, arg_zimops_r * a)
+ {
+ /* zimops not implemented, raise illegal instruction & return true */
+ if (!ctx->cfg_ptr->ext_zimops) {
+ gen_exception_illegal(ctx);
+ return true;
+ }
+
+ /*
+ * zimops implemented, simply grab destination and mov zero.
+ * return true
+ */
+ TCGv dest = dest_gpr(ctx, a->rd);
+ dest = tcg_const_tl(0);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+ }
+
+ static bool trans_zimops_rr(DisasContext *ctx, arg_zimops_rr * a)
+ {
+ /* zimops not implemented, raise illegal instruction & return true */
+ if (!ctx->cfg_ptr->ext_zimops) {
+ gen_exception_illegal(ctx);
+ return true;
+ }
+
+ /*
+ * zimops implemented, simply grab destination and mov zero.
+ * return true
+ */
+ TCGv dest = dest_gpr(ctx, a->rd);
+ dest = tcg_const_tl(0);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+ }
new file mode 100644
@@ -0,0 +1,310 @@
+/*
+ * RISC-V translation routines for the Control-Flow Integrity Extension
+ *
+ * Copyright (c) 2022-2023 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+static MemOp mxl_memop(DisasContext *ctx)
+{
+ switch (get_xl(ctx)) {
+ case MXL_RV32:
+ return MO_TEUL;
+
+ case MXL_RV64:
+ return MO_TEUQ;
+
+ case MXL_RV128:
+ return MO_TEUO;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool trans_sspop(DisasContext *ctx, arg_sspop *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* back cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ /* sspop can only load into x1 or x5. Everything else defaults to zimops */
+ if (a->rd != 1 && a->rd != 5) {
+ return false;
+ }
+
+ /*
+ * get data in TCGv using get_gpr
+ * get addr in TCGv using gen_helper_csrr on CSR_SSP
+ * use some tcg subtract arithmetic (subtract by XLEN) on addr
+ * perform ss store on computed address
+ */
+
+ TCGv addr = tcg_temp_new();
+ int tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4;
+ TCGv_i32 ssp_csr = tcg_constant_i32(CSR_SSP);
+ TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
+ gen_helper_csrr(addr, cpu_env, ssp_csr);
+ tcg_gen_qemu_ld_tl(data, addr, MMU_IDX_SS_ACCESS,
+ mxl_memop(ctx) | MO_ALIGN);
+
+ /*
+ * add XLEN/bitwidth to addr, align to XLEN . How do i do that? Is below
+ * the right way
+ */
+ tcg_gen_addi_tl(addr, addr, tmp);
+ gen_set_gpr(ctx, a->rd, data);
+ gen_helper_csrw(cpu_env, ssp_csr, addr);
+
+ return true;
+}
+
+static bool trans_sspush(DisasContext *ctx, arg_sspush *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* back cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ /*
+ * sspush can only push from x1 or x5. Everything else defaults to zimops
+ */
+ if (a->rs1 != 1 && a->rs1 != 5) {
+ return false;
+ }
+
+ /*
+ * get data in TCGv using get_gpr
+ * get addr in TCGv using gen_helper_csrr on CSR_SSP
+ * use some tcg subtract arithmetic (subtract by XLEN) on addr
+ * perform ss store on computed address
+ */
+
+ TCGv addr = tcg_temp_new();
+ int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4;
+ TCGv_i32 ssp_csr = tcg_constant_i32(CSR_SSP);
+ TCGv data = get_gpr(ctx, a->rs1, EXT_NONE);
+ gen_helper_csrr(addr, cpu_env, ssp_csr);
+
+ /*
+ * subtract XLEN from addr, align to XLEN . How do i do that? Is below the
+ * right way
+ */
+ tcg_gen_addi_tl(addr, addr, tmp);
+ tcg_gen_qemu_st_tl(data, addr, MMU_IDX_SS_ACCESS,
+ mxl_memop(ctx) | MO_ALIGN);
+
+ gen_helper_csrw(cpu_env, ssp_csr, addr);
+
+ return true;
+}
+
+static bool trans_sschckra(DisasContext *ctx, arg_sschckra *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* back cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_sschkra_mismatch(cpu_env);
+
+ return true;
+}
+
+static bool trans_ssprr(DisasContext *ctx, arg_ssprr *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* back cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv dest = get_gpr(ctx, a->rd, EXT_NONE);
+ TCGv_i32 ssp_csr = tcg_constant_i32(CSR_SSP);
+ gen_helper_csrr(dest, cpu_env, ssp_csr);
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
+
+static bool trans_ssamoswap(DisasContext *ctx, arg_ssamoswap *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* back cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ /* If cfi is enabled then, then rd must be != 0 */
+
+ if (a->rd == 0) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_address(ctx, a->rs1, 0);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ MemOp mop = (MO_ALIGN | ((get_xl(ctx) == MXL_RV32) ? MO_TESL : MO_TESQ));
+
+ tcg_gen_atomic_xchg_tl(dest, src1, src2, MMU_IDX_SS_ACCESS, mop);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_lpcll(DisasContext *ctx, arg_lpcll *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /*
+ * If this is the first instruction of the TB, let the translator
+ * know the landing pad requirement was satisfied. No need to bother
+ * checking for CFI feature or enablement.
+ */
+
+ if (ctx->base.pc_next == ctx->base.pc_first) {
+ ctx->fcfi_lp_expected = false;
+ /* PC must be 4 byte aligned */
+ if (ctx->fcfi_enabled && ((ctx->base.pc_next) & 0x3)) {
+ /*
+ * misaligned, according to spec we should raise illegal instead
+ * of mis-aligned
+ */
+ gen_exception_illegal(ctx);
+ }
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_check_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi9),
+ tcg_constant_i32(FCFI_LPLL));
+ return true;
+}
+
+static bool trans_lpcml(DisasContext *ctx, arg_lpcml *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_check_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi8),
+ tcg_constant_i32(FCFI_ML));
+ return true;
+}
+
+static bool trans_lpcul(DisasContext *ctx, arg_lpcul *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_check_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi8),
+ tcg_constant_i32(FCFI_UL));
+ return true;
+}
+
+static bool trans_lpsll(DisasContext *ctx, arg_lpsll *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_set_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi9),
+ tcg_constant_i32(FCFI_LPLL));
+
+ return true;
+}
+
+static bool trans_lpsml(DisasContext *ctx, arg_lpsml *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_set_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi8),
+ tcg_constant_i32(FCFI_ML));
+
+ return true;
+}
+
+static bool trans_lpsul(DisasContext *ctx, arg_lpsul *a)
+{
+ /* cfi only supported on 32bit and 64bit */
+ if (get_xl(ctx) != MXL_RV32 && get_xl(ctx) != MXL_RV64) {
+ return false;
+ }
+
+ /* forward cfi not enabled, should go to trans_zimops. return false */
+ if (!ctx->fcfi_enabled) {
+ return false;
+ }
+
+ gen_helper_cfi_set_landing_pad(cpu_env, tcg_constant_i32(a->imm_cfi8),
+ tcg_constant_i32(FCFI_UL));
+
+ return true;
+}
@@ -123,6 +123,73 @@ target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
return int128_getlo(rv);
}
+void helper_sschkra_mismatch(CPURISCVState *env)
+{
+ if (env->gpr[xRA] != env->gpr[xT0]) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+}
+
+void helper_cfi_jalr(CPURISCVState *env, int elp)
+{
+ /*
+ * The translation routine doesn't know if forward CFI is enabled
+ * in the current processor mode or not. It's not worth burning a
+ * cflags bit to encode this, or tracking the current-mode-fcfi
+ * enable in a dedicated member of 'env'. Just come out to a helper
+ * for jump/call on a core with CFI.
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = elp;
+ }
+}
+
+void helper_cfi_check_landing_pad(CPURISCVState *env, int lbl, int inst_type)
+{
+ if (cpu_get_fcfien(env)) {
+ switch (inst_type) {
+ case FCFI_LPLL:
+ /*
+ * Check for a lower label match. We already checked 4 byte
+ * alignment in tcg
+ */
+ if (lbl != get_field(env->lplr, LPLR_LL)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+ env->elp = NO_LP_EXPECTED;
+ break;
+ case FCFI_ML:
+ if (lbl != get_field(env->lplr, LPLR_ML)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+ break;
+ case FCFI_UL:
+ if (lbl != get_field(env->lplr, LPLR_UL)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+ }
+ }
+}
+
+void helper_cfi_set_landing_pad(CPURISCVState *env, int lbl, int inst_type)
+{
+ if (cpu_get_fcfien(env)) {
+ switch (inst_type) {
+ case FCFI_LPLL:
+ /* setting lower label always clears up entire field */
+ env->lplr = 0;
+ env->lplr = set_field(env->lplr, LPLR_LL, lbl);
+ break;
+ case FCFI_ML:
+ env->lplr = set_field(env->lplr, LPLR_ML, lbl);
+ break;
+ case FCFI_UL:
+ env->lplr = set_field(env->lplr, LPLR_UL, lbl);
+ break;
+ }
+ }
+}
+
#ifndef CONFIG_USER_ONLY
target_ulong helper_sret(CPURISCVState *env)
@@ -1071,6 +1071,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_privileged.c.inc"
#include "insn_trans/trans_svinval.c.inc"
#include "insn_trans/trans_xventanacondops.c.inc"
+#include "insn_trans/trans_zisslpcfi.c.inc"
+#include "insn_trans/trans_zimops.c.inc"
/* Include the auto-generated decoder for 16 bit insn */
#include "decode-insn16.c.inc"