@@ -942,6 +942,7 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
struct platform_device *pdev = vu_dev->pdev;
+ struct vq_transport_config tp_cfg = {};
struct virtio_uml_vq_info *info;
struct virtqueue *vq;
int num = MAX_SUPPORTED_QUEUE_SIZE;
@@ -955,10 +956,15 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
pdev->id, cfg->names[index]);
- vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
- cfg->ctx ? cfg->ctx[index] : false,
- vu_notify,
- cfg->callbacks[index], info->name);
+ tp_cfg.num = num;
+ tp_cfg.vring_align = PAGE_SIZE;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = true;
+ tp_cfg.notify = vu_notify;
+
+ cfg->names[index] = info->name;
+
+ vq = vring_create_virtqueue(vdev, index, &tp_cfg, cfg);
if (!vq) {
rc = -ENOMEM;
goto error_create;
@@ -540,6 +540,7 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
struct virtio_vq_config *cfg)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct vq_transport_config tp_cfg = {};
bool (*notify)(struct virtqueue *vq);
int err;
struct virtqueue *vq = NULL;
@@ -574,13 +575,14 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
goto out_err;
}
may_reduce = vcdev->revision > 0;
- vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
- vdev, true, may_reduce,
- cfg->ctx ? cfg->ctx[i] : false,
- notify,
- cfg->callbacks[i],
- cfg->names[i]);
+ tp_cfg.num = info->num;
+ tp_cfg.vring_align = KVM_VIRTIO_CCW_RING_ALIGN;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = may_reduce;
+ tp_cfg.notify = notify;
+
+ vq = vring_create_virtqueue(vdev, i, &tp_cfg, cfg);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
@@ -373,6 +373,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
struct virtio_vq_config *cfg)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ struct vq_transport_config tp_cfg = {};
bool (*notify)(struct virtqueue *vq);
struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
@@ -408,13 +409,14 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ tp_cfg.num = num;
+ tp_cfg.vring_align = VIRTIO_MMIO_VRING_ALIGN;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = true;
+ tp_cfg.notify = notify;
+
/* Create the vring */
- vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
- true, true,
- cfg->ctx ? cfg->ctx[index] : false,
- notify,
- cfg->callbacks[index],
- cfg->names[index]);
+ vq = vring_create_virtqueue(vdev, index, &tp_cfg, cfg);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -113,6 +113,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_vq_config *cfg,
u16 msix_vec)
{
+ struct vq_transport_config tp_cfg = {};
struct virtqueue *vq;
u16 num;
int err;
@@ -125,14 +126,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
info->msix_vector = msix_vec;
+ tp_cfg.num = num;
+ tp_cfg.vring_align = VIRTIO_PCI_VRING_ALIGN;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = false;
+ tp_cfg.notify = vp_notify;
+
/* create the vring */
- vq = vring_create_virtqueue(index, num,
- VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
- true, false,
- cfg->ctx ? cfg->ctx[index] : false,
- vp_notify,
- cfg->callbacks[index],
- cfg->names[index]);
+ vq = vring_create_virtqueue(&vp_dev->vdev, index, &tp_cfg, cfg);
if (!vq)
return ERR_PTR(-ENOMEM);
@@ -535,6 +535,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct vq_transport_config tp_cfg = {};
bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
bool is_avq;
@@ -558,14 +559,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
info->msix_vector = msix_vec;
+ tp_cfg.num = num;
+ tp_cfg.vring_align = SMP_CACHE_BYTES;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = true;
+ tp_cfg.notify = notify;
+
/* create the vring */
- vq = vring_create_virtqueue(index, num,
- SMP_CACHE_BYTES, &vp_dev->vdev,
- true, true,
- cfg->ctx ? cfg->ctx[index] : false,
- notify,
- cfg->callbacks[index],
- cfg->names[index]);
+ vq = vring_create_virtqueue(&vp_dev->vdev, index, &tp_cfg, cfg);
if (!vq)
return ERR_PTR(-ENOMEM);
@@ -2662,43 +2662,32 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
return &vq->vq;
}
-struct virtqueue *vring_create_virtqueue(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+struct virtqueue *vring_create_virtqueue(struct virtio_device *vdev,
+ unsigned int index,
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg)
{
+ struct device *dma_dev;
+ unsigned int num;
+ unsigned int vring_align;
+ bool weak_barriers;
+ bool may_reduce_num;
+ bool context;
+ bool (*notify)(struct virtqueue *_);
+ void (*callback)(struct virtqueue *_);
+ const char *name;
- if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return vring_create_virtqueue_packed(index, num, vring_align,
- vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
+ dma_dev = tp_cfg->dma_dev ? : vdev->dev.parent;
- return vring_create_virtqueue_split(index, num, vring_align,
- vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
-}
-EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+ num = tp_cfg->num;
+ vring_align = tp_cfg->vring_align;
+ weak_barriers = tp_cfg->weak_barriers;
+ may_reduce_num = tp_cfg->may_reduce_num;
+ notify = tp_cfg->notify;
-struct virtqueue *vring_create_virtqueue_dma(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
-{
+ name = cfg->names[index];
+ callback = cfg->callbacks[index];
+ context = cfg->ctx ? cfg->ctx[index] : false;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
@@ -2709,7 +2698,7 @@ struct virtqueue *vring_create_virtqueue_dma(
vdev, weak_barriers, may_reduce_num,
context, notify, callback, name, dma_dev);
}
-EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
+EXPORT_SYMBOL_GPL(vring_create_virtqueue);
/**
* virtqueue_resize - resize the vring of vq
@@ -146,8 +146,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- struct device *dma_dev;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vq_transport_config tp_cfg = {};
struct virtio_vdpa_vq_info *info;
bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
struct vdpa_callback cb;
@@ -198,16 +198,17 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
align = ops->get_vq_align(vdpa);
if (ops->get_vq_dma_dev)
- dma_dev = ops->get_vq_dma_dev(vdpa, index);
+ tp_cfg.dma_dev = ops->get_vq_dma_dev(vdpa, index);
else
- dma_dev = vdpa_get_dma_dev(vdpa);
- vq = vring_create_virtqueue_dma(index, max_num, align, vdev,
- true, may_reduce_num,
- cfg->ctx ? cfg->ctx[index] : false,
- notify,
- cfg->callbacks[index],
- cfg->names[index],
- dma_dev);
+ tp_cfg.dma_dev = vdpa_get_dma_dev(vdpa);
+
+ tp_cfg.num = max_num;
+ tp_cfg.vring_align = align;
+ tp_cfg.weak_barriers = true;
+ tp_cfg.may_reduce_num = may_reduce_num;
+ tp_cfg.notify = notify;
+
+ vq = vring_create_virtqueue(vdev, index, &tp_cfg, cfg);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -5,6 +5,7 @@
#include <asm/barrier.h>
#include <linux/irqreturn.h>
#include <uapi/linux/virtio_ring.h>
+#include <linux/virtio_config.h>
/*
* Barriers in virtio are tricky. Non-SMP virtio guests can't assume
@@ -60,38 +61,36 @@ struct virtio_device;
struct virtqueue;
struct device;
+/**
+ * struct vq_transport_config - Configuration for creating a new virtqueue (vq)
+ * @num: Number of descriptors in this virtqueue.
+ * @vring_align: Alignment size of this virtqueue's ring.
+ * @weak_barriers: Memory barrier strategy used within virtio_[rw]mb() to
+ * enforce ordering of memory operations.
+ * @may_reduce_num: Indicates whether the number of descriptors can be reduced
+ * if vring allocation fails.
+ * @notify: Callback function used to notify the device of certain events.
+ * @dma_dev: DMA device associated with this virtqueue, used by the DMA API.
+ */
+struct vq_transport_config {
+ unsigned int num;
+ unsigned int vring_align;
+ bool weak_barriers;
+ bool may_reduce_num;
+ bool (*notify)(struct virtqueue *vq);
+ struct device *dma_dev;
+};
+
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
* expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
-struct virtqueue *vring_create_virtqueue(unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool ctx,
- bool (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq),
- const char *name);
-
-/*
- * Creates a virtqueue and allocates the descriptor ring with per
- * virtqueue DMA device.
- */
-struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool ctx,
- bool (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq),
- const char *name,
- struct device *dma_dev);
+struct virtqueue *vring_create_virtqueue(struct virtio_device *vdev,
+ unsigned int index,
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg);
/*
* Creates a virtqueue with a standard layout but a caller-allocated