@@ -196,9 +196,6 @@ struct receive_queue {
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
-
- /* Do dma by self */
- bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -638,7 +635,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf && rq->do_dma)
+ if (buf && rq->vq->premapped)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -651,7 +648,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
- if (!rq->do_dma) {
+ if (!rq->vq->premapped) {
sg_init_one(rq->sg, buf, len);
return;
}
@@ -681,7 +678,7 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- if (rq->do_dma) {
+ if (rq->vq->premapped) {
dma = head;
/* new pages */
@@ -727,22 +724,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
return buf;
}
-static void virtnet_rq_set_premapped(struct virtnet_info *vi)
-{
- int i;
-
- /* disable for big mode */
- if (!vi->mergeable_rx_bufs && vi->big_packets)
- return;
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- if (virtqueue_set_dma_premapped(vi->rq[i].vq))
- continue;
-
- vi->rq[i].do_dma = true;
- }
-}
-
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -751,7 +732,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
rq = &vi->rq[i];
- if (rq->do_dma)
+ if (rq->vq->premapped)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -1846,7 +1827,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
+ if (rq->vq->premapped)
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -1961,7 +1942,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
+ if (rq->vq->premapped)
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -4030,7 +4011,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ if (vi->rq[i].vq->premapped && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -4094,11 +4075,13 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
static int virtnet_find_vqs(struct virtnet_info *vi)
{
+ struct virtio_vq_config cfg = {};
vq_callback_t **callbacks;
struct virtqueue **vqs;
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
+ bool *premapped;
bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
@@ -4122,8 +4105,13 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
goto err_ctx;
+
+ premapped = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ goto err_premapped;
} else {
ctx = NULL;
+ premapped = NULL;
}
/* Parameters for control virtqueue, if any */
@@ -4142,10 +4130,19 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
names[txq2vq(i)] = vi->sq[i].name;
if (ctx)
ctx[rxq2vq(i)] = true;
+
+ if (premapped)
+ premapped[rxq2vq(i)] = true;
}
- ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
- names, ctx, NULL);
+ cfg.nvqs = total_vqs;
+ cfg.vqs = vqs;
+ cfg.callbacks = callbacks;
+ cfg.names = names;
+ cfg.ctx = ctx;
+ cfg.premapped = premapped;
+
+ ret = virtio_find_vqs_cfg(vi->vdev, &cfg);
if (ret)
goto err_find;
@@ -4165,6 +4162,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
err_find:
+ kfree(premapped);
+err_premapped:
kfree(ctx);
err_ctx:
kfree(names);
@@ -4234,8 +4233,6 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_rq_set_premapped(vi);
-
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
@@ -256,21 +256,9 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
static inline
-int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
- struct irq_affinity *desc)
+int virtio_find_vqs_cfg(struct virtio_device *vdev, struct virtio_vq_config *cfg)
{
- struct virtio_vq_config cfg = {};
-
- cfg.nvqs = nvqs;
- cfg.vqs = vqs;
- cfg.callbacks = callbacks;
- cfg.names = names;
- cfg.ctx = ctx;
- cfg.desc = desc;
-
- return vdev->config->find_vqs(vdev, &cfg);
+ return vdev->config->find_vqs(vdev, cfg);
}
/**
Now, the virtio core can set the premapped mode by find_vqs(). If the premapped can be enabled, the dma array will not be allocated. So virtio-net use the api of find_vqs to enable the premapped. Judge the premapped mode by the vq->premapped instead of saving local variable. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio_net.c | 57 +++++++++++++++++------------------ include/linux/virtio_config.h | 16 ++-------- 2 files changed, 29 insertions(+), 44 deletions(-)