@@ -331,12 +331,18 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
/* Set if TLB entry must have MMU lookup repeated for every access */
#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
+/* Set if TLB entry must take the slow path. */
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS - 5))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
-#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_RECHECK)
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK \
+ | TLB_NOTDIRTY \
+ | TLB_MMIO \
+ | TLB_RECHECK \
+ | TLB_FORCE_SLOW)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
The fast path is taken when TLB_FLAGS_MASK is all zero. TLB_FORCE_SLOW is simply a TLB_FLAGS_MASK bit to force the slow path, there are no other side effects. Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> --- include/exec/cpu-all.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) -- 1.8.3.1