diff mbox series

[3/4] cputlb: Byte swap memory transaction attribute

Message ID 1563343691021.27469@bt.com
State New
Headers show
Series Invert Endian bit in SPARCv9 MMU TTE | expand

Commit Message

Tony Nguyen July 17, 2019, 6:08 a.m. UTC
Notice new attribute, byte swap, and force the transaction through the
memory slow path.

Required by architectures that can invert endianness of memory
transaction, e.g. SPARC64 has the Invert Endian TTE bit.

Signed-off-by: Tony Nguyen <tony.nguyen@bt.com>
---
 accel/tcg/cputlb.c      | 10 +++++++++-
 include/exec/memattrs.h |  2 ++
 2 files changed, 11 insertions(+), 1 deletion(-)

Comments

Richard Henderson July 17, 2019, 2:29 p.m. UTC | #1
On 7/16/19 11:08 PM, tony.nguyen@bt.com wrote:
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index baa61719ad..11debb7dda 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -731,7 +731,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>                vaddr, paddr, prot, mmu_idx);
>  
>      address = vaddr_page;
> -    if (size < TARGET_PAGE_SIZE) {
> +    if (size < TARGET_PAGE_SIZE || attrs.byte_swap) {

I don't think you want to re-use TLB_RECHECK.  This operation requires the
slow-path, yes, but not another call into cpu->cc->tlb_fill.


r~
diff mbox series

Patch

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index baa61719ad..11debb7dda 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -731,7 +731,7 @@  void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
               vaddr, paddr, prot, mmu_idx);
 
     address = vaddr_page;
-    if (size < TARGET_PAGE_SIZE) {
+    if (size < TARGET_PAGE_SIZE || attrs.byte_swap) {
         /*
          * Slow-path the TLB entries; we will repeat the MMU check and TLB
          * fill on every access.
@@ -891,6 +891,10 @@  static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
     bool locked = false;
     MemTxResult r;
 
+    if (iotlbentry->attrs.byte_swap) {
+        op ^= MO_BSWAP;
+    }
+
     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
     mr = section->mr;
     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -933,6 +937,10 @@  static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
     bool locked = false;
     MemTxResult r;
 
+    if (iotlbentry->attrs.byte_swap) {
+        op ^= MO_BSWAP;
+    }
+
     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
     mr = section->mr;
     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index d4a3477d71..a0644ebba1 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -37,6 +37,8 @@  typedef struct MemTxAttrs {
     unsigned int user:1;
     /* Requester ID (for MSI for example) */
     unsigned int requester_id:16;
+    /* SPARC64: TTE invert endianness */
+    unsigned int byte_swap:1;
     /*
      * The following are target-specific page-table bits.  These are not
      * related to actual memory transactions at all.  However, this structure