@@ -213,6 +213,9 @@ struct vring_virtqueue {
bool last_add_time_valid;
ktime_t last_add_time;
#endif
+
+ /* DMA mapping Pre-handler for virtio device driver */
+ struct virtqueue_pre_dma_ops *pre_dma_ops;
};
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
@@ -369,6 +372,19 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
return (dma_addr_t)sg_phys(sg);
}
+ /* Allow virtio drivers to perform customized mapping operation, and
+ * fallback to the generic path if it fails to handle the mapping.
+ */
+ if (vq->pre_dma_ops && vq->pre_dma_ops->map_page) {
+ dma_addr_t addr;
+
+ addr = vq->pre_dma_ops->map_page(vring_dma_dev(vq),
+ sg_page(sg), sg->offset, sg->length,
+ direction, 0);
+ if (addr)
+ return addr;
+ }
+
/*
* We can't use dma_map_sg, because we don't use scatterlists in
* the way it expects (we don't guarantee that the scatterlist
@@ -432,6 +448,15 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
+
dma_unmap_page(vring_dma_dev(vq),
virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio32_to_cpu(vq->vq.vdev, desc->len),
@@ -456,14 +481,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
extra[i].len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- dma_unmap_page(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ goto out;
+ } else if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ goto out;
}
+ dma_unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
out:
return extra[i].next;
}
@@ -1206,10 +1239,19 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
dma_unmap_page(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
}
}
@@ -1223,6 +1265,15 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
flags = le16_to_cpu(desc->flags);
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
+
dma_unmap_page(vring_dma_dev(vq),
le64_to_cpu(desc->addr),
le32_to_cpu(desc->len),
@@ -2052,6 +2103,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed_ring = true;
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->pre_dma_ops = NULL;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2541,6 +2593,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
#endif
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->pre_dma_ops = NULL;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2945,4 +2998,12 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+/* The virtio device driver can register its own DMA map/unmap pre-handler. */
+void virtqueue_register_pre_dma_ops(struct virtqueue *vq,
+ struct virtqueue_pre_dma_ops *pre_dma_ops)
+{
+ to_vvq(vq)->pre_dma_ops = pre_dma_ops;
+}
+EXPORT_SYMBOL_GPL(virtqueue_register_pre_dma_ops);
+
MODULE_LICENSE("GPL");
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-map-ops.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -203,4 +204,21 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+/**
+ * struct virtqueue_pre_dma_ops - DMA pre-handler for virtio device driver
+ * @map_page: map a single page of memory for DMA
+ * @unmap_page: unmap a single page of memory for DMA
+ */
+struct virtqueue_pre_dma_ops {
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ bool (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+};
+
+void virtqueue_register_pre_dma_ops(struct virtqueue *vq,
+ struct virtqueue_pre_dma_ops *pre_dma_ops);
+
#endif /* _LINUX_VIRTIO_H */
Currently, DMA operations of virtio devices' data buffer are encapsulated within the underlying virtqueue implementation. DMA map/unmap operations are performed for each data buffer attached to/detached from the virtqueue, which is transparent and invisible to the higher-level virtio device drivers. This encapsulation makes it not viable for device drivers to introduce certain mechanisms, such as page pool, that require explicit management of DMA map/unmap. Therefore, by inserting a pre-handler before the generic DMA map/unmap operations, virtio device drivers have the opportunity to participate in DMA operations. Signed-off-by: Liang Chen <liangchen.linux@gmail.com> --- drivers/virtio/virtio_ring.c | 73 +++++++++++++++++++++++++++++++++--- include/linux/virtio.h | 18 +++++++++ 2 files changed, 85 insertions(+), 6 deletions(-)