diff mbox series

[v3,5/8] lib: sbi: Introduce the SBI debug triggers extension support

Message ID 20240109170020.1731282-6-hchauhan@ventanamicro.com
State Accepted
Headers show
Series Introduce support for SBI Debug Trigger Extension | expand

Commit Message

Himanshu Chauhan Jan. 9, 2024, 5 p.m. UTC
RISC-V Debug specification includes Sdtrig ISA extension
which describes Trigger Module. Triggers can cause
a breakpoint exception or trace action without execution
of a special instruction. They can be used to implement
hardware breakpoints and watchpoints for native debugging.

The SBI Debut Trigger extension (Draft v6) can be found at:
https://lists.riscv.org/g/tech-debug/topic/99825362#1302

This patch is an initial implementation of SBI Debug
Trigger Extension (Draft v6) in OpenSBI.

The following features are supported:
 * mcontrol, mcontrol6 triggers
 * Breakpoint and trace actions

NOTE: Chained triggers are not supported

Signed-off-by: Himanshu Chauhan <hchauhan@ventanamicro.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
---
 include/sbi/sbi_dbtr.h | 128 +++++++
 lib/sbi/objects.mk     |   1 +
 lib/sbi/sbi_dbtr.c     | 739 +++++++++++++++++++++++++++++++++++++++++
 lib/sbi/sbi_init.c     |   9 +
 4 files changed, 877 insertions(+)
 create mode 100644 include/sbi/sbi_dbtr.h
 create mode 100644 lib/sbi/sbi_dbtr.c
diff mbox series

Patch

diff --git a/include/sbi/sbi_dbtr.h b/include/sbi/sbi_dbtr.h
new file mode 100644
index 0000000..dc9749f
--- /dev/null
+++ b/include/sbi/sbi_dbtr.h
@@ -0,0 +1,128 @@ 
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Ventana Micro Systems, Inc.
+ *
+ * Authors:
+ *   Himanshu Chauhan <hchauhan@ventanamicro.com>
+ */
+
+#ifndef __SBI_DBTR_H__
+#define __SBI_DBTR_H__
+
+#include <sbi/riscv_dbtr.h>
+
+#include <sbi/sbi_hartmask.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_types.h>
+#include <sbi/sbi_byteorder.h>
+
+enum {
+	RV_DBTR_DECLARE_BIT(TS, MAPPED, 0), /* trigger mapped to hw trigger */
+	RV_DBTR_DECLARE_BIT(TS, U, 1),
+	RV_DBTR_DECLARE_BIT(TS, S, 2),
+	RV_DBTR_DECLARE_BIT(TS, VU, 3),
+	RV_DBTR_DECLARE_BIT(TS, VS, 4),
+	RV_DBTR_DECLARE_BIT(TS, HAVE_TRIG, 5), /* H/w dbtr details available */
+	RV_DBTR_DECLARE_BIT(TS, HW_IDX, 8), /* Hardware index of trigger */
+};
+
+enum {
+	RV_DBTR_DECLARE_BIT_MASK(TS, MAPPED, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, U, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, S, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, VU, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, VS, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, HAVE_TRIG, 1),
+	RV_DBTR_DECLARE_BIT_MASK(TS, HW_IDX, (__riscv_xlen-9)),
+};
+
+#if __riscv_xlen == 64
+#define SBI_DBTR_SHMEM_INVALID_ADDR	0xFFFFFFFFFFFFFFFFUL
+#elif __riscv_xlen == 32
+#define SBI_DBTR_SHMEM_INVALID_ADDR	0xFFFFFFFFUL
+#error "Unexpected __riscv_xlen"
+#endif
+
+struct sbi_dbtr_shmem {
+	unsigned long phys_lo;
+	unsigned long phys_hi;
+};
+
+struct sbi_dbtr_trigger {
+	unsigned long index;
+	unsigned long type_mask;
+	unsigned long state;
+	unsigned long tdata1;
+	unsigned long tdata2;
+	unsigned long tdata3;
+};
+
+struct sbi_dbtr_data_msg {
+	unsigned long tstate;
+	unsigned long tdata1;
+	unsigned long tdata2;
+	unsigned long tdata3;
+};
+
+struct sbi_dbtr_id_msg {
+	unsigned long idx;
+};
+
+struct sbi_dbtr_hart_triggers_state {
+	struct sbi_dbtr_trigger triggers[RV_MAX_TRIGGERS];
+	struct sbi_dbtr_shmem shmem;
+	u32 total_trigs;
+	u32 available_trigs;
+	u32 hartid;
+	u32 probed;
+};
+
+#define TDATA1_GET_TYPE(_t1)					\
+	EXTRACT_FIELD(_t1, RV_DBTR_BIT_MASK(TDATA1, TYPE))
+
+/* Set the hardware index of trigger in logical trigger state */
+#define SET_TRIG_HW_INDEX(_state, _idx)				\
+	do {							\
+		_state &= ~RV_DBTR_BIT_MASK(TS, HW_IDX);	\
+		_state |= (((unsigned long)_idx			\
+			    << RV_DBTR_BIT(TS, HW_IDX))		\
+			   & RV_DBTR_BIT_MASK(TS, HW_IDX));	\
+	}while (0);
+
+/** SBI shared mem messages layout */
+struct sbi_dbtr_shmem_entry {
+	struct sbi_dbtr_data_msg data;
+	struct sbi_dbtr_id_msg id;
+};
+
+#define SBI_DBTR_SHMEM_ALIGN_MASK               ((__riscv_xlen / 8) - 1)
+
+/** Initialize debug triggers */
+int sbi_dbtr_init(struct sbi_scratch *scratch, bool coldboot);
+
+/** SBI DBTR extension functions */
+int sbi_dbtr_supported(void);
+int sbi_dbtr_setup_shmem(const struct sbi_domain *dom, unsigned long smode,
+			 unsigned long shmem_phys_lo,
+			 unsigned long shmem_phys_hi);
+int sbi_dbtr_num_trig(unsigned long trig_tdata1, unsigned long *out);
+int sbi_dbtr_read_trig(const struct sbi_domain *dom, unsigned long smode,
+		       unsigned long trig_idx_base, unsigned long trig_count);
+int sbi_dbtr_install_trig(const struct sbi_domain *dom, unsigned long smode,
+			  unsigned long trig_count, unsigned long *out);
+int sbi_dbtr_uninstall_trig(unsigned long trig_idx_base,
+			    unsigned long trig_idx_mask);
+int sbi_dbtr_enable_trig(unsigned long trig_idx_base,
+			 unsigned long trig_idx_mask);
+int sbi_dbtr_update_trig(const struct sbi_domain *dom,
+			 unsigned long smode,
+			 unsigned long trig_idx_base,
+			 unsigned long trig_idx_mask);
+int sbi_dbtr_disable_trig(unsigned long trig_idx_base,
+			  unsigned long trig_idx_mask);
+
+int sbi_dbtr_get_total_triggers(void);
+
+#endif
diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk
index c699187..c7de150 100644
--- a/lib/sbi/objects.mk
+++ b/lib/sbi/objects.mk
@@ -70,6 +70,7 @@  libsbi-objs-y += sbi_irqchip.o
 libsbi-objs-y += sbi_misaligned_ldst.o
 libsbi-objs-y += sbi_platform.o
 libsbi-objs-y += sbi_pmu.o
+libsbi-objs-y += sbi_dbtr.o
 libsbi-objs-y += sbi_scratch.o
 libsbi-objs-y += sbi_string.o
 libsbi-objs-y += sbi_system.o
diff --git a/lib/sbi/sbi_dbtr.c b/lib/sbi/sbi_dbtr.c
new file mode 100644
index 0000000..60d9dfa
--- /dev/null
+++ b/lib/sbi/sbi_dbtr.c
@@ -0,0 +1,739 @@ 
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Ventana Micro Systems, Inc.
+ *
+ * Author(s):
+ *   Himanshu Chauhan <hchauhan@ventanamicro.com>
+ */
+
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_csr_detect.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_dbtr.h>
+#include <sbi/sbi_heap.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_asm.h>
+
+
+/** Offset of pointer to HART's debug triggers info in scratch space */
+static unsigned long hart_state_ptr_offset;
+
+#define dbtr_get_hart_state_ptr(__scratch)				\
+	sbi_scratch_read_type((__scratch), void *, hart_state_ptr_offset)
+
+#define dbtr_thishart_state_ptr()				\
+	dbtr_get_hart_state_ptr(sbi_scratch_thishart_ptr())
+
+#define dbtr_set_hart_state_ptr(__scratch, __hart_state)		\
+	sbi_scratch_write_type((__scratch), void *, hart_state_ptr_offset, \
+			       (__hart_state))
+
+#define INDEX_TO_TRIGGER(_index)					\
+	({								\
+		struct sbi_dbtr_trigger *__trg = NULL;			\
+		struct sbi_dbtr_hart_triggers_state *__hs = NULL;	\
+		__hs = dbtr_get_hart_state_ptr(sbi_scratch_thishart_ptr()); \
+		__trg = &__hs->triggers[_index];			\
+		(__trg);						\
+	})
+
+#define for_each_trig_entry(_base, _max, _etype, _entry)		\
+	for (int _idx = 0; _entry = ((_etype *)_base + _idx),		\
+	     _idx < _max;						\
+	     _idx++, _entry = ((_etype *)_base + _idx))
+
+#if __riscv_xlen == 64
+#define DBTR_SHMEM_MAKE_PHYS(_p_hi, _p_lo) (((u64)(_p_hi) << 32) | (_p_lo))
+#elif __riscv_xlen == 32
+#define DBTR_SHMEM_MAKE_PHYS(_p_hi, _p_lo) (((u64)(_p_hi) << 32) | (_p_lo))
+#else
+#error "Undefined XLEN"
+#endif
+
+static inline int sbi_dbtr_shmem_disabled(void)
+{
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	hs = dbtr_get_hart_state_ptr(sbi_scratch_thishart_ptr());
+
+	if (!hs)
+		return 1;
+
+	return (hs->shmem.phys_lo == SBI_DBTR_SHMEM_INVALID_ADDR &&
+		hs->shmem.phys_hi == SBI_DBTR_SHMEM_INVALID_ADDR
+		? 1 : 0);
+}
+
+static inline void sbi_dbtr_disable_shmem(void)
+{
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	hs = dbtr_get_hart_state_ptr(sbi_scratch_thishart_ptr());
+
+	if (!hs)
+		return;
+
+	hs->shmem.phys_lo = SBI_DBTR_SHMEM_INVALID_ADDR;
+	hs->shmem.phys_hi = SBI_DBTR_SHMEM_INVALID_ADDR;
+}
+
+static inline void *hart_shmem_base(void)
+{
+	struct sbi_dbtr_shmem* shmem;
+	unsigned long phys_hi, phys_lo;
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	hs = dbtr_get_hart_state_ptr(sbi_scratch_thishart_ptr());
+
+	if (!hs)
+		return NULL;
+
+	shmem = &hs->shmem;
+
+	phys_hi = (shmem->phys_hi == SBI_DBTR_SHMEM_INVALID_ADDR
+		   ? shmem->phys_hi : 0);
+	phys_lo = (shmem->phys_lo == SBI_DBTR_SHMEM_INVALID_ADDR
+		   ? 0 : shmem->phys_lo);
+
+	return ((void *)DBTR_SHMEM_MAKE_PHYS(phys_hi, phys_lo));
+}
+
+static void sbi_trigger_init(struct sbi_dbtr_trigger *trig,
+			     unsigned long type_mask, unsigned long idx)
+{
+	trig->type_mask = type_mask;
+	trig->state = 0;
+	trig->tdata1 = 0;
+	trig->tdata2 = 0;
+	trig->tdata3 = 0;
+	trig->index = idx;
+}
+
+static inline struct sbi_dbtr_trigger *sbi_alloc_trigger(void)
+{
+	int i;
+	struct sbi_dbtr_trigger *f_trig = NULL;
+	struct sbi_dbtr_hart_triggers_state *hart_state;
+
+	hart_state = dbtr_thishart_state_ptr();
+	if (!hart_state)
+		return NULL;
+
+	if (hart_state->available_trigs <= 0)
+		return NULL;
+
+	for (i = 0; i < hart_state->total_trigs; i++) {
+		f_trig = INDEX_TO_TRIGGER(i);
+		if (f_trig->state & RV_DBTR_BIT(TS, MAPPED))
+			continue;
+		hart_state->available_trigs--;
+		break;
+	}
+
+	if (i == hart_state->total_trigs)
+		return NULL;
+
+	__set_bit(RV_DBTR_BIT(TS, MAPPED), &f_trig->state);
+
+	return f_trig;
+}
+
+static inline void sbi_free_trigger(struct sbi_dbtr_trigger *trig)
+{
+	struct sbi_dbtr_hart_triggers_state *hart_state;
+
+	if (trig == NULL)
+		return;
+
+	hart_state = dbtr_thishart_state_ptr();
+	if (!hart_state)
+		return;
+
+	trig->state = 0;
+	trig->tdata1 = 0;
+	trig->tdata2 = 0;
+	trig->tdata3 = 0;
+
+	hart_state->available_trigs++;
+}
+
+int sbi_dbtr_init(struct sbi_scratch *scratch, bool coldboot)
+{
+	struct sbi_trap_info trap = {0};
+	unsigned long tdata1;
+	unsigned long val;
+	int i;
+	struct sbi_dbtr_hart_triggers_state *hart_state = NULL;
+
+	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SDTRIG))
+		return SBI_SUCCESS;
+
+	if (coldboot) {
+		hart_state_ptr_offset = sbi_scratch_alloc_type_offset(void *);
+		if (!hart_state_ptr_offset)
+			return SBI_ENOMEM;
+	}
+
+	hart_state = dbtr_get_hart_state_ptr(scratch);
+	if (!hart_state) {
+		hart_state = sbi_zalloc(sizeof(*hart_state));
+		if (!hart_state)
+			return SBI_ENOMEM;
+		hart_state->hartid = current_hartid();
+		dbtr_set_hart_state_ptr(scratch, hart_state);
+	}
+
+	/* disable the shared memory */
+	sbi_dbtr_disable_shmem();
+
+	/* Skip probing triggers if already probed */
+	if (hart_state->probed)
+		goto _probed;
+
+	for (i = 0; i < RV_MAX_TRIGGERS; i++) {
+		csr_write_allowed(CSR_TSELECT, (ulong)&trap, i);
+		if (trap.cause)
+			break;
+
+		val = csr_read_allowed(CSR_TSELECT, (ulong)&trap);
+		if (trap.cause)
+			break;
+
+		/*
+		 * Read back tselect and check that it contains the
+		 * written value
+		 */
+		if (val != i)
+			break;
+
+		val = csr_read_allowed(CSR_TINFO, (ulong)&trap);
+		if (trap.cause) {
+			/*
+			 * If reading tinfo caused an exception, the
+			 * debugger must read tdata1 to discover the
+			 * type.
+			 */
+			tdata1 = csr_read_allowed(CSR_TDATA1,
+						  (ulong)&trap);
+			if (trap.cause)
+				break;
+
+			if (TDATA1_GET_TYPE(tdata1) == 0)
+				break;
+
+			sbi_trigger_init(INDEX_TO_TRIGGER(i),
+					 BIT(TDATA1_GET_TYPE(tdata1)),
+					 i);
+			hart_state->total_trigs++;
+		} else {
+			if (val == 1)
+				break;
+
+			sbi_trigger_init(INDEX_TO_TRIGGER(i), val, i);
+			hart_state->total_trigs++;
+		}
+	}
+
+	hart_state->probed = 1;
+
+ _probed:
+	hart_state->available_trigs = hart_state->total_trigs;
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_get_total_triggers(void)
+{
+	struct sbi_dbtr_hart_triggers_state *hs;
+	struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+	/*
+	 * This function may be used during ecall registration.
+	 * By that time the debug trigger module might not be
+	 * initialized. If the extension is not supported, report
+	 * number of triggers as 0.
+	 */
+	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SDTRIG))
+		return 0;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return 0;
+
+	return hs->total_trigs;
+}
+
+int sbi_dbtr_setup_shmem(const struct sbi_domain *dom, unsigned long smode,
+			 unsigned long shmem_phys_lo,
+			 unsigned long shmem_phys_hi)
+{
+	u32 hartid = current_hartid();
+	struct sbi_dbtr_hart_triggers_state *hart_state;
+
+	if (dom && !sbi_domain_is_assigned_hart(dom, hartid)) {
+		sbi_dprintf("%s: calling hart not assigned to this domain\n",
+			   __func__);
+		return SBI_ERR_DENIED;
+	}
+
+	/* call is to disable shared memory */
+	if (shmem_phys_lo == SBI_DBTR_SHMEM_INVALID_ADDR
+	    && shmem_phys_hi == SBI_DBTR_SHMEM_INVALID_ADDR) {
+		sbi_dbtr_disable_shmem();
+		return SBI_SUCCESS;
+	}
+
+	/* the shared memory must be disabled on this hart */
+	if (!sbi_dbtr_shmem_disabled())
+		return SBI_ERR_ALREADY_AVAILABLE;
+
+	/* lower physical address must be XLEN/8 bytes aligned */
+	if (shmem_phys_lo & SBI_DBTR_SHMEM_ALIGN_MASK)
+		return SBI_ERR_INVALID_PARAM;
+
+	if (dom && !sbi_domain_check_addr(dom, shmem_phys_lo, smode,
+					  SBI_DOMAIN_READ | SBI_DOMAIN_WRITE))
+		return SBI_ERR_INVALID_ADDRESS;
+
+	if (shmem_phys_hi != SBI_DBTR_SHMEM_INVALID_ADDR) {
+		if (dom &&
+		    !sbi_domain_check_addr(dom, shmem_phys_hi, smode,
+					   SBI_DOMAIN_READ | SBI_DOMAIN_WRITE))
+			return SBI_ERR_INVALID_ADDRESS;
+	}
+
+	hart_state = dbtr_thishart_state_ptr();
+	if (!hart_state)
+		return SBI_ERR_FAILED;
+
+	hart_state->shmem.phys_lo = shmem_phys_lo;
+	hart_state->shmem.phys_hi = shmem_phys_hi;
+
+	return SBI_SUCCESS;
+}
+
+static void dbtr_trigger_setup(struct sbi_dbtr_trigger *trig,
+			       struct sbi_dbtr_data_msg *recv)
+{
+	unsigned long tdata1;
+
+	if (!trig)
+		return;
+
+	trig->tdata1 = lle_to_cpu(recv->tdata1);
+	trig->tdata2 = lle_to_cpu(recv->tdata2);
+	trig->tdata3 = lle_to_cpu(recv->tdata3);
+
+	tdata1 = lle_to_cpu(recv->tdata1);
+
+	trig->state = 0;
+
+	__set_bit(RV_DBTR_BIT(TS, MAPPED), &trig->state);
+
+	SET_TRIG_HW_INDEX(trig->state, trig->index);
+
+	switch (TDATA1_GET_TYPE(tdata1)) {
+	case RISCV_DBTR_TRIG_MCONTROL:
+		if (__test_bit(RV_DBTR_BIT(MC, U), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, U), &trig->state);
+
+		if (__test_bit(RV_DBTR_BIT(MC, S), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, S), &trig->state);
+		break;
+	case RISCV_DBTR_TRIG_MCONTROL6:
+		if (__test_bit(RV_DBTR_BIT(MC6, U), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, U), &trig->state);
+
+		if (__test_bit(RV_DBTR_BIT(MC6, S), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, S), &trig->state);
+
+		if (__test_bit(RV_DBTR_BIT(MC6, VU), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, VU), &trig->state);
+
+		if (__test_bit(RV_DBTR_BIT(MC6, VS), &tdata1))
+			__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
+		break;
+	default:
+		sbi_dprintf("%s: Unknown type (tdata1: 0x%lx Type: %ld)\n",
+			    __func__, tdata1, TDATA1_GET_TYPE(tdata1));
+		break;
+	}
+}
+
+static inline void update_bit(unsigned long new, int nr, volatile unsigned long *addr)
+{
+	if (new)
+		__set_bit(nr, addr);
+	else
+		__clear_bit(nr, addr);
+}
+
+static void dbtr_trigger_enable(struct sbi_dbtr_trigger *trig)
+{
+	unsigned long state;
+	unsigned long tdata1;
+
+	if (!trig && !(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
+		return;
+
+	state = trig->state;
+	tdata1 = trig->tdata1;
+
+	switch (TDATA1_GET_TYPE(tdata1)) {
+	case RISCV_DBTR_TRIG_MCONTROL:
+		update_bit(state & RV_DBTR_BIT_MASK(TS, U),
+			   RV_DBTR_BIT(MC, U), &trig->tdata1);
+		update_bit(state & RV_DBTR_BIT_MASK(TS, S),
+			   RV_DBTR_BIT(MC, S), &trig->tdata1);
+		break;
+	case RISCV_DBTR_TRIG_MCONTROL6:
+		update_bit(state & RV_DBTR_BIT_MASK(TS, VU),
+			   RV_DBTR_BIT(MC6, VU), &trig->tdata1);
+		update_bit(state & RV_DBTR_BIT_MASK(TS, VS),
+			   RV_DBTR_BIT(MC6, VS), &trig->tdata1);
+		update_bit(state & RV_DBTR_BIT_MASK(TS, U),
+			   RV_DBTR_BIT(MC6, U), &trig->tdata1);
+		update_bit(state & RV_DBTR_BIT_MASK(TS, S),
+			   RV_DBTR_BIT(MC6, S), &trig->tdata1);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * RISC-V Debug Support v1.0.0 section 5.5:
+	 * Debugger cannot simply set a trigger by writing tdata1, then tdata2,
+	 * etc. The current value of tdata2 might not be legal with the new
+	 * value of tdata1. To help with this situation, it is guaranteed that
+	 * writing 0 to tdata1 disables the trigger, and leaves it in a state
+	 * where tdata2 and tdata3 can be written with any value that makes
+	 * sense for any trigger type supported by this trigger.
+	 */
+	csr_write(CSR_TSELECT, trig->index);
+	csr_write(CSR_TDATA1, 0x0);
+	csr_write(CSR_TDATA2, trig->tdata2);
+	csr_write(CSR_TDATA1, trig->tdata1);
+}
+
+static void dbtr_trigger_disable(struct sbi_dbtr_trigger *trig)
+{
+	unsigned long tdata1;
+
+	if (!trig && !(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
+		return;
+
+	tdata1 = trig->tdata1;
+
+	switch (TDATA1_GET_TYPE(tdata1)) {
+	case RISCV_DBTR_TRIG_MCONTROL:
+		__clear_bit(RV_DBTR_BIT(MC, U), &trig->tdata1);
+		__clear_bit(RV_DBTR_BIT(MC, S), &trig->tdata1);
+		break;
+	case RISCV_DBTR_TRIG_MCONTROL6:
+		__clear_bit(RV_DBTR_BIT(MC6, VU), &trig->tdata1);
+		__clear_bit(RV_DBTR_BIT(MC6, VS), &trig->tdata1);
+		__clear_bit(RV_DBTR_BIT(MC6, U), &trig->tdata1);
+		__clear_bit(RV_DBTR_BIT(MC6, S), &trig->tdata1);
+		break;
+	default:
+		break;
+	}
+
+	csr_write(CSR_TSELECT, trig->index);
+	csr_write(CSR_TDATA1, trig->tdata1);
+}
+
+static void dbtr_trigger_clear(struct sbi_dbtr_trigger *trig)
+{
+	if (!trig && !(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
+		return;
+
+	csr_write(CSR_TSELECT, trig->index);
+	csr_write(CSR_TDATA1, 0x0);
+	csr_write(CSR_TDATA2, 0x0);
+}
+
+static int dbtr_trigger_supported(unsigned long type)
+{
+	switch (type) {
+	case RISCV_DBTR_TRIG_MCONTROL:
+	case RISCV_DBTR_TRIG_MCONTROL6:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int dbtr_trigger_valid(unsigned long type, unsigned long tdata)
+{
+	switch (type) {
+	case RISCV_DBTR_TRIG_MCONTROL:
+		if (!(tdata & RV_DBTR_BIT_MASK(MC, DMODE)) &&
+		    !(tdata & RV_DBTR_BIT_MASK(MC, M)))
+			return 1;
+		break;
+	case RISCV_DBTR_TRIG_MCONTROL6:
+		if (!(tdata & RV_DBTR_BIT_MASK(MC6, DMODE)) &&
+		    !(tdata & RV_DBTR_BIT_MASK(MC6, M)))
+			return 1;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int sbi_dbtr_num_trig(unsigned long data, unsigned long *out)
+{
+	unsigned long type = TDATA1_GET_TYPE(data);
+	u32 hartid = current_hartid();
+	unsigned long total = 0;
+	struct sbi_dbtr_trigger *trig;
+	int i;
+	struct sbi_dbtr_hart_triggers_state *hs;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	if (data == 0) {
+		*out = hs->total_trigs;
+		return SBI_SUCCESS;
+	}
+
+	for (i = 0; i < hs->total_trigs; i++) {
+		trig = INDEX_TO_TRIGGER(i);
+
+		if (__test_bit(type, &trig->type_mask))
+			total++;
+	}
+
+	sbi_dprintf("%s: hart%d: total triggers of type %lu: %lu\n",
+		    __func__, hartid, type, total);
+
+	*out = total;
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_read_trig(const struct sbi_domain *dom, unsigned long smode,
+		       unsigned long trig_idx_base, unsigned long trig_count)
+{
+	struct sbi_dbtr_data_msg *xmit;
+	u32 hartid = current_hartid();
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_shmem_entry *entry;
+	void *shmem_base = NULL;
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
+		return SBI_ERR_DENIED;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	if (trig_idx_base >= hs->total_trigs ||
+	    trig_idx_base + trig_count >= hs->total_trigs)
+		return SBI_ERR_INVALID_PARAM;
+
+	if (sbi_dbtr_shmem_disabled())
+		return SBI_ERR_NO_SHMEM;
+
+	shmem_base = hart_shmem_base();
+
+	for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
+		sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
+		xmit = &entry->data;
+		trig = INDEX_TO_TRIGGER((_idx + trig_idx_base));
+		xmit->tstate = cpu_to_lle(trig->state);
+		xmit->tdata1 = cpu_to_lle(trig->tdata1);
+		xmit->tdata2 = cpu_to_lle(trig->tdata2);
+		xmit->tdata3 = cpu_to_lle(trig->tdata3);
+		sbi_hart_unmap_saddr();
+	}
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_install_trig(const struct sbi_domain *dom, unsigned long smode,
+			  unsigned long trig_count, unsigned long *out)
+{
+	u32 hartid = current_hartid();
+	void *shmem_base = NULL;
+	struct sbi_dbtr_shmem_entry *entry;
+	struct sbi_dbtr_data_msg *recv;
+	struct sbi_dbtr_id_msg *xmit;
+	unsigned long ctrl;
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
+		return SBI_ERR_DENIED;
+
+	if (sbi_dbtr_shmem_disabled())
+		return SBI_ERR_NO_SHMEM;
+
+	shmem_base = hart_shmem_base();
+	hs = dbtr_thishart_state_ptr();
+
+	/* Check requested triggers configuration */
+	for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
+		sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
+		recv = (struct sbi_dbtr_data_msg *)(&entry->data);
+		ctrl = recv->tdata1;
+
+		if (!dbtr_trigger_supported(TDATA1_GET_TYPE(ctrl))) {
+			*out = _idx;
+			sbi_hart_unmap_saddr();
+			return SBI_ERR_FAILED;
+		}
+
+		if (!dbtr_trigger_valid(TDATA1_GET_TYPE(ctrl), ctrl)) {
+			*out = _idx;
+			sbi_hart_unmap_saddr();
+			return SBI_ERR_FAILED;
+		}
+		sbi_hart_unmap_saddr();
+	}
+
+	if (hs->available_trigs < trig_count) {
+		*out = hs->available_trigs;
+		return SBI_ERR_FAILED;
+	}
+
+	/* Install triggers */
+	for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
+		/*
+		 * Since we have already checked if enough triggers are
+		 * available, trigger allocation must succeed.
+		 */
+		trig = sbi_alloc_trigger();
+
+		sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
+
+		recv = (struct sbi_dbtr_data_msg *)(&entry->data);
+		xmit = (struct sbi_dbtr_id_msg *)(&entry->id);
+
+		dbtr_trigger_setup(trig,  recv);
+		dbtr_trigger_enable(trig);
+		xmit->idx = cpu_to_lle(trig->index);
+		sbi_hart_unmap_saddr();
+	}
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_uninstall_trig(unsigned long trig_idx_base,
+			    unsigned long trig_idx_mask)
+{
+	unsigned long trig_mask = trig_idx_mask << trig_idx_base;
+	unsigned long idx = trig_idx_base;
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_hart_triggers_state *hs;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	for_each_set_bit_from(idx, &trig_mask, hs->total_trigs) {
+		trig = INDEX_TO_TRIGGER(idx);
+		if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
+			return SBI_ERR_INVALID_PARAM;
+
+		dbtr_trigger_clear(trig);
+
+		sbi_free_trigger(trig);
+	}
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_enable_trig(unsigned long trig_idx_base,
+			 unsigned long trig_idx_mask)
+{
+	unsigned long trig_mask = trig_idx_mask << trig_idx_base;
+	unsigned long idx = trig_idx_base;
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_hart_triggers_state *hs;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	for_each_set_bit_from(idx, &trig_mask, hs->total_trigs) {
+		trig = INDEX_TO_TRIGGER(idx);
+		sbi_dprintf("%s: enable trigger %lu\n", __func__, idx);
+		dbtr_trigger_enable(trig);
+	}
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_update_trig(const struct sbi_domain *dom,
+			 unsigned long smode,
+			 unsigned long trig_idx_base,
+			 unsigned long trig_idx_mask)
+{
+	unsigned long trig_mask = trig_idx_mask << trig_idx_base;
+	unsigned long idx = trig_idx_base;
+	u32 hartid = current_hartid();
+	struct sbi_dbtr_data_msg *recv;
+	unsigned long uidx = 0;
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_shmem_entry *entry;
+	void *shmem_base = NULL;
+	struct sbi_dbtr_hart_triggers_state *hs = NULL;
+
+	if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
+		return SBI_ERR_DENIED;
+
+	if (sbi_dbtr_shmem_disabled())
+		return SBI_ERR_NO_SHMEM;
+
+	shmem_base = hart_shmem_base();
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	for_each_set_bit_from(idx, &trig_mask, hs->total_trigs) {
+		trig = INDEX_TO_TRIGGER(idx);
+
+		if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED)))
+			return SBI_ERR_INVALID_PARAM;
+
+		entry = (shmem_base + uidx * sizeof(*entry));
+		recv = &entry->data;
+
+		trig->tdata2 = lle_to_cpu(recv->tdata2);
+		dbtr_trigger_enable(trig);
+		uidx++;
+	}
+
+	return SBI_SUCCESS;
+}
+
+int sbi_dbtr_disable_trig(unsigned long trig_idx_base,
+			  unsigned long trig_idx_mask)
+{
+	unsigned long trig_mask = trig_idx_mask << trig_idx_base;
+	unsigned long idx = trig_idx_base;
+	struct sbi_dbtr_trigger *trig;
+	struct sbi_dbtr_hart_triggers_state *hs;
+
+	hs = dbtr_thishart_state_ptr();
+	if (!hs)
+		return SBI_ERR_FAILED;
+
+	for_each_set_bit_from(idx, &trig_mask, hs->total_trigs) {
+		trig = INDEX_TO_TRIGGER(idx);
+		dbtr_trigger_disable(trig);
+	}
+
+	return SBI_SUCCESS;
+}
diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c
index 6a98e13..0dcde27 100644
--- a/lib/sbi/sbi_init.c
+++ b/lib/sbi/sbi_init.c
@@ -23,6 +23,7 @@ 
 #include <sbi/sbi_irqchip.h>
 #include <sbi/sbi_platform.h>
 #include <sbi/sbi_pmu.h>
+#include <sbi/sbi_dbtr.h>
 #include <sbi/sbi_system.h>
 #include <sbi/sbi_string.h>
 #include <sbi/sbi_timer.h>
@@ -322,6 +323,10 @@  static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
 		sbi_hart_hang();
 	}
 
+	rc = sbi_dbtr_init(scratch, true);
+	if (rc)
+		sbi_hart_hang();
+
 	sbi_boot_print_banner(scratch);
 
 	rc = sbi_irqchip_init(scratch, true);
@@ -439,6 +444,10 @@  static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
 	if (rc)
 		sbi_hart_hang();
 
+	rc = sbi_dbtr_init(scratch, false);
+	if (rc)
+		sbi_hart_hang();
+
 	rc = sbi_irqchip_init(scratch, false);
 	if (rc)
 		sbi_hart_hang();