Patchwork [1/4] cpu-all: Add unaligned load/store helper functions

login
register
mail settings
Submitter Richard Henderson
Date Oct. 17, 2012, 4:17 a.m.
Message ID <1350447438-8603-2-git-send-email-rth@twiddle.net>
Download mbox | patch
Permalink /patch/191957/
State New
Headers show

Comments

Richard Henderson - Oct. 17, 2012, 4:17 a.m.
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 cpu-all.h | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)
Blue Swirl - Oct. 19, 2012, 4:52 p.m.
On Wed, Oct 17, 2012 at 4:17 AM, Richard Henderson <rth@twiddle.net> wrote:
> Signed-off-by: Richard Henderson <rth@twiddle.net>
> ---
>  cpu-all.h | 38 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 38 insertions(+)
>
> diff --git a/cpu-all.h b/cpu-all.h
> index 2b99682..2db4414 100644
> --- a/cpu-all.h
> +++ b/cpu-all.h
> @@ -113,6 +113,44 @@ static inline void tswap64s(uint64_t *s)
>  #define bswaptls(s) bswap64s(s)
>  #endif
>
> +/* Unaligned loads and stores.  */
> +
> +static inline uint16_t unaligned_r16(const void *ptr)

I think the names should match other loads and stores, so this should
be something like lduw_unaligned_raw().

> +{
> +    uint16_t ret;
> +    memcpy(&ret, ptr, sizeof(ret));

This also assumes native host byte order which may not be so useful.
Perhaps actually there should be three versions, _be, _le and without
infix for target byte order?

> +    return ret;
> +}
> +
> +static inline uint32_t unaligned_r32(const void *ptr)
> +{
> +    uint32_t ret;
> +    memcpy(&ret, ptr, sizeof(ret));
> +    return ret;
> +}
> +
> +static inline uint64_t unaligned_r64(const void *ptr)
> +{
> +    uint64_t ret;
> +    memcpy(&ret, ptr, sizeof(ret));
> +    return ret;
> +}
> +
> +static inline void unaligned_w16(void *ptr, uint16_t v)
> +{
> +    memcpy(ptr, &v, sizeof(v));
> +}
> +
> +static inline void unaligned_w32(void *ptr, uint32_t v)
> +{
> +    memcpy(ptr, &v, sizeof(v));
> +}
> +
> +static inline void unaligned_w64(void *ptr, uint64_t v)
> +{
> +    memcpy(ptr, &v, sizeof(v));
> +}
> +
>  /* CPU memory access without any memory or io remapping */
>
>  /*
> --
> 1.7.11.7
>
>

Patch

diff --git a/cpu-all.h b/cpu-all.h
index 2b99682..2db4414 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -113,6 +113,44 @@  static inline void tswap64s(uint64_t *s)
 #define bswaptls(s) bswap64s(s)
 #endif
 
+/* Unaligned loads and stores.  */
+
+static inline uint16_t unaligned_r16(const void *ptr)
+{
+    uint16_t ret;
+    memcpy(&ret, ptr, sizeof(ret));
+    return ret;
+}
+
+static inline uint32_t unaligned_r32(const void *ptr)
+{
+    uint32_t ret;
+    memcpy(&ret, ptr, sizeof(ret));
+    return ret;
+}
+
+static inline uint64_t unaligned_r64(const void *ptr)
+{
+    uint64_t ret;
+    memcpy(&ret, ptr, sizeof(ret));
+    return ret;
+}
+
+static inline void unaligned_w16(void *ptr, uint16_t v)
+{
+    memcpy(ptr, &v, sizeof(v));
+}
+
+static inline void unaligned_w32(void *ptr, uint32_t v)
+{
+    memcpy(ptr, &v, sizeof(v));
+}
+
+static inline void unaligned_w64(void *ptr, uint64_t v)
+{
+    memcpy(ptr, &v, sizeof(v));
+}
+
 /* CPU memory access without any memory or io remapping */
 
 /*