@@ -81,6 +81,7 @@ struct vring_desc_state_packed {
struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
u16 num; /* Descriptor list length. */
u16 last; /* The last desc state in a list. */
+ u32 flags; /* State flags. */
};
struct vring_desc_extra {
@@ -1272,7 +1273,8 @@ static u16 packed_last_used(u16 last_used_idx)
}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
- const struct vring_desc_extra *extra)
+ const struct vring_desc_extra *extra,
+ bool dma_map_internal)
{
u16 flags;
@@ -1287,6 +1289,9 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!dma_map_internal)
+ return;
+
dma_unmap_page(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
@@ -1453,6 +1458,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unsigned int i, n, c, descs_used;
__le16 head_flags, flags;
u16 head, id, prev, curr;
+ bool dma_map_internal;
int err;
START_USE(vq);
@@ -1498,7 +1504,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
- if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) {
+ dma_map_internal = !sgs[0]->dma_address;
+ if (dma_map_internal && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) {
END_USE(vq);
return -EIO;
}
@@ -1552,6 +1559,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
vq->packed.desc_state[id].data = data;
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
+ vq->packed.desc_state[id].flags = dma_map_internal ? VRING_STATE_F_MAP_INTERNAL : 0;
/*
* A driver MUST NOT make the first descriptor in the list
@@ -1623,8 +1631,10 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
struct vring_desc_state_packed *state = NULL;
struct vring_packed_desc *desc;
unsigned int i, curr;
+ bool dma_map_internal;
state = &vq->packed.desc_state[id];
+ dma_map_internal = !!(state->flags & VRING_STATE_F_MAP_INTERNAL);
/* Clear data ptr. */
state->data = NULL;
@@ -1637,7 +1647,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
- &vq->packed.desc_extra[curr]);
+ &vq->packed.desc_extra[curr],
+ dma_map_internal);
curr = vq->packed.desc_extra[curr].next;
}
}
virtio core only supports virtual addresses, dma is completed in virtio core. In some scenarios (such as the AF_XDP), the memory is allocated and DMA mapping is completed in advance, so it is necessary for us to support passing the DMA address to virtio core. Drives can use sg->dma_address to pass the mapped dma address to virtio core. If one sg->dma_address is used then all sgs must use sg->dma_address, otherwise all must be null when passing it to the APIs of virtio. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/virtio/virtio_ring.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)