diff options
author | Rusty Russell | 2014-06-24 19:40:16 +0200 |
---|---|---|
committer | Michael S. Tsirkin | 2014-06-29 18:39:42 +0200 |
commit | cee3ca002855f87b78aaa7769efeb90321757e49 (patch) | |
tree | bee9d959470b145cd53c5ae71f826e511b66ab15 /hw | |
parent | virtio: memory accessors for endian-ambivalent targets (diff) | |
download | qemu-cee3ca002855f87b78aaa7769efeb90321757e49.tar.gz qemu-cee3ca002855f87b78aaa7769efeb90321757e49.tar.xz qemu-cee3ca002855f87b78aaa7769efeb90321757e49.zip |
virtio: allow byte swapping for vring
Quoting original text from Rusty: "This is based on a simpler patch by Anthony
Liguouri".
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
[ add VirtIODevice * argument to most helpers,
Greg Kurz <gkurz@linux.vnet.ibm.com> ]
Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Reviewed-by: Alexander Graf <agraf@suse.de>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/virtio/virtio.c | 89 |
1 files changed, 48 insertions, 41 deletions
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index a0676e06af..5c981801f3 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -20,6 +20,7 @@ #include "qemu/atomic.h" #include "hw/virtio/virtio-bus.h" #include "migration/migration.h" +#include "hw/virtio/virtio-access.h" /* * The alignment to use between consumer and producer parts of vring. @@ -102,53 +103,56 @@ static void virtqueue_init(VirtQueue *vq) vq->vring.align); } -static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i) +static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, + int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); - return ldq_phys(&address_space_memory, pa); + return virtio_ldq_phys(vdev, pa); } -static inline uint32_t vring_desc_len(hwaddr desc_pa, int i) +static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); - return ldl_phys(&address_space_memory, pa); + return virtio_ldl_phys(vdev, pa); } -static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i) +static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, + int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vdev, pa); } -static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) +static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, + int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vdev, pa); } static inline uint16_t vring_avail_flags(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, flags); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vq->vdev, pa); } static inline uint16_t vring_avail_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, idx); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vq->vdev, pa); } static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vq->vdev, pa); } static inline uint16_t vring_used_event(VirtQueue *vq) @@ -160,44 +164,44 @@ static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); - stl_phys(&address_space_memory, pa, val); + virtio_stl_phys(vq->vdev, pa, val); } static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); - stl_phys(&address_space_memory, pa, val); + virtio_stl_phys(vq->vdev, pa, val); } static uint16_t vring_used_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); - return lduw_phys(&address_space_memory, pa); + return virtio_lduw_phys(vq->vdev, pa); } static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); - stw_phys(&address_space_memory, pa, val); + virtio_stw_phys(vq->vdev, pa, val); } static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) { + VirtIODevice *vdev = vq->vdev; hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, flags); - stw_phys(&address_space_memory, - pa, lduw_phys(&address_space_memory, pa) | mask); + virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); } static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) { + VirtIODevice *vdev = vq->vdev; hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, flags); - stw_phys(&address_space_memory, - pa, lduw_phys(&address_space_memory, pa) & ~mask); + virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); } static inline void vring_avail_event(VirtQueue *vq, uint16_t val) @@ -207,7 +211,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val) return; } pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); - stw_phys(&address_space_memory, pa, val); + virtio_stw_phys(vq->vdev, pa, val); } void virtio_queue_set_notification(VirtQueue *vq, int enable) @@ -324,17 +328,18 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) return head; } -static unsigned virtqueue_next_desc(hwaddr desc_pa, +static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, unsigned int i, unsigned int max) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ - if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT)) + if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { return max; + } /* Check they're not leading us off end of descriptors. */ - next = vring_desc_next(desc_pa, i); + next = vring_desc_next(vdev, desc_pa, i); /* Make sure compiler knows to grab that: we don't want it changing! */ smp_wmb(); @@ -357,6 +362,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, total_bufs = in_total = out_total = 0; while (virtqueue_num_heads(vq, idx)) { + VirtIODevice *vdev = vq->vdev; unsigned int max, num_bufs, indirect = 0; hwaddr desc_pa; int i; @@ -366,8 +372,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, i = virtqueue_get_head(vq, idx++); desc_pa = vq->vring.desc; - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { - if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { + if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { + if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { error_report("Invalid size for indirect buffer table"); exit(1); } @@ -380,8 +386,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, /* loop over the indirect descriptor table */ indirect = 1; - max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); - desc_pa = vring_desc_addr(desc_pa, i); + max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); + desc_pa = vring_desc_addr(vdev, desc_pa, i); num_bufs = i = 0; } @@ -392,15 +398,15 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, exit(1); } - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { - in_total += vring_desc_len(desc_pa, i); + if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { + in_total += vring_desc_len(vdev, desc_pa, i); } else { - out_total += vring_desc_len(desc_pa, i); + out_total += vring_desc_len(vdev, desc_pa, i); } if (in_total >= max_in_bytes && out_total >= max_out_bytes) { goto done; } - } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); + } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); if (!indirect) total_bufs = num_bufs; @@ -451,6 +457,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) { unsigned int i, head, max; hwaddr desc_pa = vq->vring.desc; + VirtIODevice *vdev = vq->vdev; if (!virtqueue_num_heads(vq, vq->last_avail_idx)) return 0; @@ -461,19 +468,19 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) max = vq->vring.num; i = head = virtqueue_get_head(vq, vq->last_avail_idx++); - if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { + if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { vring_avail_event(vq, vring_avail_idx(vq)); } - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { - if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { + if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { + if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { error_report("Invalid size for indirect buffer table"); exit(1); } /* loop over the indirect descriptor table */ - max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); - desc_pa = vring_desc_addr(desc_pa, i); + max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); + desc_pa = vring_desc_addr(vdev, desc_pa, i); i = 0; } @@ -481,30 +488,30 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) do { struct iovec *sg; - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { + if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { error_report("Too many write descriptors in indirect table"); exit(1); } - elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); + elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); sg = &elem->in_sg[elem->in_num++]; } else { if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { error_report("Too many read descriptors in indirect table"); exit(1); } - elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i); + elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); sg = &elem->out_sg[elem->out_num++]; } - sg->iov_len = vring_desc_len(desc_pa, i); + sg->iov_len = vring_desc_len(vdev, desc_pa, i); /* If we've got too many, that implies a descriptor loop. */ if ((elem->in_num + elem->out_num) > max) { error_report("Looped descriptor"); exit(1); } - } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); + } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); /* Now map what we have collected */ virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); |