From patchwork Thu Aug 8 05:15:42 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rusty Russell X-Patchwork-Id: 265653 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 008642C009E for ; Thu, 8 Aug 2013 15:21:49 +1000 (EST) Received: from localhost ([::1]:58709 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V7IfT-0008T5-3R for incoming@patchwork.ozlabs.org; Thu, 08 Aug 2013 01:21:47 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:48874) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V7IcT-0002eN-Aa for qemu-devel@nongnu.org; Thu, 08 Aug 2013 01:18:48 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1V7IcN-0000FW-Gj for qemu-devel@nongnu.org; Thu, 08 Aug 2013 01:18:41 -0400 Received: from ozlabs.org ([203.10.76.45]:37702) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V7IcM-0000E8-UM for qemu-devel@nongnu.org; Thu, 08 Aug 2013 01:18:35 -0400 Received: by ozlabs.org (Postfix, from userid 1011) id E66EA2C00BC; Thu, 8 Aug 2013 15:18:28 +1000 (EST) From: Rusty Russell To: qemu-devel@nongnu.org Date: Thu, 8 Aug 2013 14:45:42 +0930 Message-Id: <1375938949-22622-2-git-send-email-rusty@rustcorp.com.au> X-Mailer: git-send-email 1.8.1.2 In-Reply-To: <1375938949-22622-1-git-send-email-rusty@rustcorp.com.au> References: <1375938949-22622-1-git-send-email-rusty@rustcorp.com.au> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 203.10.76.45 Cc: Rusty Russell Subject: [Qemu-devel] [PATCH 1/7] virtio: allow byte swapping for vring and config access X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Virtio is currently defined to work as "guest endian", but this is a problem if the guest can change endian. As most targets can't change endian, we make it a per-target option to avoid pessimising. This is based on a simpler patch by Anthony Liguouri, which only handled the vring accesses. We also need some drivers to access these helpers, eg. for data which contains headers. Signed-off-by: Rusty Russell --- hw/virtio/virtio.c | 46 +++++++++---- include/hw/virtio/virtio-access.h | 138 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 14 deletions(-) create mode 100644 include/hw/virtio/virtio-access.h diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 8176c14..2887f17 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -18,6 +18,7 @@ #include "hw/virtio/virtio.h" #include "qemu/atomic.h" #include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" /* The alignment to use between consumer and producer parts of vring. * x86 pagesize again. */ @@ -84,6 +85,20 @@ struct VirtQueue EventNotifier host_notifier; }; +#ifdef TARGET_VIRTIO_SWAPENDIAN +bool virtio_byteswap; + +/* Ask target code if we should swap endian for all vring and config access. */ +static void mark_endian(void) +{ + virtio_byteswap = virtio_swap_endian(); +} +#else +static void mark_endian(void) +{ +} +#endif + /* virt queue functions */ static void virtqueue_init(VirtQueue *vq) { @@ -100,49 +115,49 @@ static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); - return ldq_phys(pa); + return virtio_ldq_phys(pa); } static inline uint32_t vring_desc_len(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); - return ldl_phys(pa); + return virtio_ldl_phys(pa); } static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline uint16_t vring_avail_flags(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, flags); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline uint16_t vring_avail_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, idx); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline uint16_t vring_used_event(VirtQueue *vq) @@ -154,42 +169,42 @@ static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); - stl_phys(pa, val); + virtio_stl_phys(pa, val); } static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); - stl_phys(pa, val); + virtio_stl_phys(pa, val); } static uint16_t vring_used_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); - return lduw_phys(pa); + return virtio_lduw_phys(pa); } static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); - stw_phys(pa, val); + virtio_stw_phys(pa, val); } static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, flags); - stw_phys(pa, lduw_phys(pa) | mask); + virtio_stw_phys(pa, virtio_lduw_phys(pa) | mask); } static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, flags); - stw_phys(pa, lduw_phys(pa) & ~mask); + virtio_stw_phys(pa, virtio_lduw_phys(pa) & ~mask); } static inline void vring_avail_event(VirtQueue *vq, uint16_t val) @@ -199,7 +214,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val) return; } pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); - stw_phys(pa, val); + virtio_stw_phys(pa, val); } void virtio_queue_set_notification(VirtQueue *vq, int enable) @@ -525,6 +540,9 @@ void virtio_set_status(VirtIODevice *vdev, uint8_t val) VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); trace_virtio_set_status(vdev, val); + /* If guest virtio endian is uncertain, set it now. */ + mark_endian(); + if (k->set_status) { k->set_status(vdev, val); } diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h new file mode 100644 index 0000000..b1d531e --- /dev/null +++ b/include/hw/virtio/virtio-access.h @@ -0,0 +1,138 @@ +/* + * Virtio Accessor Support: In case your target can change endian. + * + * Copyright IBM, Corp. 2013 + * + * Authors: + * Rusty Russell + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ +#ifndef _QEMU_VIRTIO_ACCESS_H +#define _QEMU_VIRTIO_ACCESS_H + +#ifdef TARGET_VIRTIO_SWAPENDIAN +/* Architectures which need biendian define this function. */ +extern bool virtio_swap_endian(void); + +extern bool virtio_byteswap; +#else +#define virtio_byteswap false +#endif + +static inline uint16_t virtio_lduw_phys(hwaddr pa) +{ + if (virtio_byteswap) { + return bswap16(lduw_phys(pa)); + } + return lduw_phys(pa); + +} + +static inline uint32_t virtio_ldl_phys(hwaddr pa) +{ + if (virtio_byteswap) { + return bswap32(ldl_phys(pa)); + } + return ldl_phys(pa); +} + +static inline uint64_t virtio_ldq_phys(hwaddr pa) +{ + if (virtio_byteswap) { + return bswap64(ldq_phys(pa)); + } + return ldq_phys(pa); +} + +static inline void virtio_stw_phys(hwaddr pa, uint16_t value) +{ + if (virtio_byteswap) { + stw_phys(pa, bswap16(value)); + } else { + stw_phys(pa, value); + } +} + +static inline void virtio_stl_phys(hwaddr pa, uint32_t value) +{ + if (virtio_byteswap) { + stl_phys(pa, bswap32(value)); + } else { + stl_phys(pa, value); + } +} + +static inline void virtio_stw_p(void *ptr, uint16_t v) +{ + if (virtio_byteswap) { + stw_p(ptr, bswap16(v)); + } else { + stw_p(ptr, v); + } +} + +static inline void virtio_stl_p(void *ptr, uint32_t v) +{ + if (virtio_byteswap) { + stl_p(ptr, bswap32(v)); + } else { + stl_p(ptr, v); + } +} + +static inline void virtio_stq_p(void *ptr, uint64_t v) +{ + if (virtio_byteswap) { + stq_p(ptr, bswap64(v)); + } else { + stq_p(ptr, v); + } +} + +static inline int virtio_lduw_p(const void *ptr) +{ + if (virtio_byteswap) { + return bswap16(lduw_p(ptr)); + } else { + return lduw_p(ptr); + } +} + +static inline int virtio_ldl_p(const void *ptr) +{ + if (virtio_byteswap) { + return bswap32(ldl_p(ptr)); + } else { + return ldl_p(ptr); + } +} + +static inline uint64_t virtio_ldq_p(const void *ptr) +{ + if (virtio_byteswap) { + return bswap64(ldq_p(ptr)); + } else { + return ldq_p(ptr); + } +} + +static inline uint32_t virtio_tswap32(uint32_t s) +{ + if (virtio_byteswap) { + return bswap32(tswap32(s)); + } else { + return tswap32(s); + } +} + +static inline void virtio_tswap32s(uint32_t *s) +{ + tswap32s(s); + if (virtio_byteswap) { + *s = bswap32(*s); + } +} +#endif /* _QEMU_VIRTIO_ACCESS_H */