diff mbox series

[v3] ARC: io.h: Implement reads{x}()/writes{x}()

Message ID 244eba764469e8493f2d0c85a6d965aad6cd8128.1543571088.git.joabreu@synopsys.com
State New
Headers show
Series [v3] ARC: io.h: Implement reads{x}()/writes{x}() | expand

Commit Message

Jose Abreu Nov. 30, 2018, 9:47 a.m. UTC
Some ARC CPU's do not support unaligned loads/stores. Currently, generic
implementation of reads{b/w/l}()/writes{b/w/l}() is being used with ARC.
This can lead to misfunction of some drivers as generic functions do a
plain dereference of a pointer that can be unaligned.

Let's use {get/put}_unaligned() helpers instead of plain dereference of
pointer in order to fix. The helpers allow to get and store data from an
unaligned address whilst preserving the CPU internal alignment.
According to [1], the use of these helpers are costly in terms of
performance so we added an initial check for a buffer already aligned so
that the usage of the helpers can be avoided, when possible.

[1] Documentation/unaligned-memory-access.txt

Changes from v2:
- Rework commit msg (Vineet)
- Check if count is 0 and return (Vineet)
- Use two different loops (Vineet)

Changes from v1:
- Check if buffer is already aligned (David)
- Remove 64 bit mention (Alexey)

Signed-off-by: Jose Abreu <joabreu@synopsys.com>
Tested-by: Vitor Soares <soares@synopsys.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Alexey Brodkin <abrodkin@synopsys.com>
Cc: Joao Pinto <jpinto@synopsys.com>
Cc: Vitor Soares <soares@synopsys.com>
Cc: David Laight <David.Laight@ACULAB.COM>
---
 arch/arc/include/asm/io.h | 67 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

Comments

Vineet Gupta Nov. 30, 2018, 7:29 p.m. UTC | #1
On 11/30/18 1:47 AM, Jose Abreu wrote:
> Some ARC CPU's do not support unaligned loads/stores. Currently, generic
> implementation of reads{b/w/l}()/writes{b/w/l}() is being used with ARC.
> This can lead to misfunction of some drivers as generic functions do a
> plain dereference of a pointer that can be unaligned.
>
> Let's use {get/put}_unaligned() helpers instead of plain dereference of
> pointer in order to fix. The helpers allow to get and store data from an
> unaligned address whilst preserving the CPU internal alignment.
> According to [1], the use of these helpers are costly in terms of
> performance so we added an initial check for a buffer already aligned so
> that the usage of the helpers can be avoided, when possible.
>
> [1] Documentation/unaligned-memory-access.txt
>
> Changes from v2:
> - Rework commit msg (Vineet)
> - Check if count is 0 and return (Vineet)
> - Use two different loops (Vineet)
>
> Changes from v1:
> - Check if buffer is already aligned (David)
> - Remove 64 bit mention (Alexey)
>
> Signed-off-by: Jose Abreu <joabreu@synopsys.com>
> Tested-by: Vitor Soares <soares@synopsys.com>
> Cc: Vineet Gupta <vgupta@synopsys.com>
> Cc: Alexey Brodkin <abrodkin@synopsys.com>
> Cc: Joao Pinto <jpinto@synopsys.com>
> Cc: Vitor Soares <soares@synopsys.com>
> Cc: David Laight <David.Laight@ACULAB.COM>

Applied to for-curr after some minor chg (added a comment about accessor)

Thx,
-Vineet
diff mbox series

Patch

diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index c22b181e8206..6c89c31fbe15 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -12,6 +12,7 @@ 
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
+#include <asm/unaligned.h>
 
 #ifdef CONFIG_ISA_ARCV2
 #include <asm/barrier.h>
@@ -94,6 +95,37 @@  static inline u32 __raw_readl(const volatile void __iomem *addr)
 	return w;
 }
 
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+				  void *buffer, unsigned int count) \
+{ \
+	bool is_aligned = ((unsigned long)buffer % ((t) / 8)) == 0; \
+	u##t *buf = buffer; \
+\
+	if (!count) \
+		return; \
+\
+	/* Some ARC CPU's don't support unaligned accesses */ \
+	if (is_aligned) { \
+		do { \
+			u##t x = __raw_read##f(addr); \
+			*buf++ = x; \
+		} while (--count); \
+	} else { \
+		do { \
+			u##t x = __raw_read##f(addr); \
+			put_unaligned(x, buf++); \
+		} while (--count); \
+	} \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
 #define __raw_writeb __raw_writeb
 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
 {
@@ -126,6 +158,35 @@  static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+				   const void *buffer, unsigned int count) \
+{ \
+	bool is_aligned = ((unsigned long)buffer % ((t) / 8)) == 0; \
+	const u##t *buf = buffer; \
+\
+	if (!count) \
+		return; \
+\
+	/* Some ARC CPU's don't support unaligned accesses */ \
+	if (is_aligned) { \
+		do { \
+			__raw_write##f(*buf++, addr); \
+		} while (--count); \
+	} else { \
+		do { \
+			__raw_write##f(get_unaligned(buf++), addr); \
+		} while (--count); \
+	} \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
@@ -141,10 +202,16 @@  static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
 
 #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
 #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
 #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
 
 /*
  * Relaxed API for drivers which can handle barrier ordering themselves