@@ -478,7 +478,7 @@ static void *virtnet_rq_get_buf(struct virtnet_rq *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf && rq->do_dma)
+ if (buf && virtqueue_get_dma_premapped(rq->vq))
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -491,7 +491,7 @@ static void virtnet_rq_init_one_sg(struct virtnet_rq *rq, void *buf, u32 len)
u32 offset;
void *head;
- if (!rq->do_dma) {
+ if (!virtqueue_get_dma_premapped(rq->vq)) {
sg_init_one(rq->sg, buf, len);
return;
}
@@ -521,7 +521,7 @@ static void *virtnet_rq_alloc(struct virtnet_rq *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- if (rq->do_dma) {
+ if (virtqueue_get_dma_premapped(rq->vq)) {
dma = head;
/* new pages */
@@ -575,12 +575,8 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;
- for (i = 0; i < vi->max_queue_pairs; i++) {
- if (virtqueue_set_dma_premapped(vi->rq[i].vq))
- continue;
-
- vi->rq[i].do_dma = true;
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtqueue_set_dma_premapped(vi->rq[i].vq);
}
static void free_old_xmit(struct virtnet_sq *sq, bool in_napi)
@@ -1638,7 +1634,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct virtnet_rq *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
+ if (virtqueue_get_dma_premapped(rq->vq))
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -1753,7 +1749,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
+ if (virtqueue_get_dma_premapped(rq->vq))
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -3822,7 +3818,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ if (virtqueue_get_dma_premapped(vi->rq[i].vq) && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -3850,7 +3846,7 @@ static void virtnet_rq_free_unused_bufs(struct virtqueue *vq)
rq = &vi->rq[i];
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (rq->do_dma)
+ if (virtqueue_get_dma_premapped(rq->vq))
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -104,9 +104,6 @@ struct virtnet_rq {
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
-
- /* Do dma by self */
- bool do_dma;
};
struct virtnet_info {
@@ -2905,6 +2905,28 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
+/**
+ * virtqueue_get_dma_premapped - get the vring premapped mode
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Get the premapped mode of the vq.
+ *
+ * Returns bool for the vq premapped mode.
+ */
+bool virtqueue_get_dma_premapped(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ bool premapped;
+
+ START_USE(vq);
+ premapped = vq->premapped;
+ END_USE(vq);
+
+ return premapped;
+
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_dma_premapped);
+
/**
* virtqueue_reset - detach and recycle all unused buffers
* @_vq: the struct virtqueue we're talking about.
@@ -98,6 +98,7 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+bool virtqueue_get_dma_premapped(struct virtqueue *_vq);
bool virtqueue_poll(struct virtqueue *vq, unsigned);
Introduce helper virtqueue_get_dma_premapped(), then the driver can know whether dma unmap is needed. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio/main.c | 22 +++++++++------------- drivers/net/virtio/virtio_net.h | 3 --- drivers/virtio/virtio_ring.c | 22 ++++++++++++++++++++++ include/linux/virtio.h | 1 + 4 files changed, 32 insertions(+), 16 deletions(-)