diff mbox series

[RFC,03/10] dma-buf: add support for NET_TX pages

Message ID 20230710223304.1174642-4-almasrymina@google.com (mailing list archive)
State New
Headers show
Series Device Memory TCP | expand

Commit Message

Mina Almasry July 10, 2023, 10:32 p.m. UTC
Used the paged attachment mappings support to create NET_TX pages.
NET_TX pages can be used in the networking transmit path:

1. Create an iov_iter & bio_vec entries to represent this dmabuf.

2. Initialize the bio_vec with the backing dmabuf pages.

Signed-off-by: Mina Almasry <almasrymina@google.com>
---
 drivers/dma-buf/dma-buf.c    | 47 ++++++++++++++++++++++++++++++++++++
 include/linux/dma-buf.h      |  7 ++++++
 include/uapi/linux/dma-buf.h |  1 +
 3 files changed, 55 insertions(+)
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index acb86bf406f4..3ca71297b9b4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1683,6 +1683,7 @@  static void dma_buf_pages_destroy(struct percpu_ref *ref)
 }
 
 const struct dma_buf_pages_type_ops net_rx_ops;
+const struct dma_buf_pages_type_ops net_tx_ops;
 
 static long dma_buf_create_pages(struct file *file,
 				 struct dma_buf_create_pages_info *create_info)
@@ -1799,6 +1800,9 @@  static long dma_buf_create_pages(struct file *file,
 	case DMA_BUF_PAGES_NET_RX:
 		priv->type_ops = &net_rx_ops;
 		break;
+	case DMA_BUF_PAGES_NET_TX:
+		priv->type_ops = &net_tx_ops;
+		break;
 	default:
 		err = -EINVAL;
 		goto out_put_new_file;
@@ -2140,3 +2144,46 @@  struct page *dma_buf_pages_net_rx_alloc(struct dma_buf_pages *priv)
 	percpu_ref_get(&priv->pgmap.ref);
 	return pg;
 }
+
+/********************************
+ *	dma_buf_pages_net_tx	*
+ ********************************/
+
+static void dma_buf_pages_net_tx_release(struct dma_buf_pages *priv,
+					 struct file *file)
+{
+	int i;
+	for (i = 0; i < priv->num_pages; i++)
+		put_page(&priv->pages[i]);
+}
+
+static int dma_buf_pages_net_tx_init(struct dma_buf_pages *priv,
+				     struct file *file)
+{
+	int i;
+	priv->net_tx.tx_bv = kvmalloc_array(priv->num_pages,
+					    sizeof(struct bio_vec), GFP_KERNEL);
+	if (!priv->net_tx.tx_bv)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->num_pages; i++) {
+		priv->net_tx.tx_bv[i].bv_page = &priv->pages[i];
+		priv->net_tx.tx_bv[i].bv_offset = 0;
+		priv->net_tx.tx_bv[i].bv_len = PAGE_SIZE;
+	}
+	percpu_ref_get_many(&priv->pgmap.ref, priv->num_pages);
+	iov_iter_bvec(&priv->net_tx.iter, WRITE, priv->net_tx.tx_bv,
+		      priv->num_pages, priv->dmabuf->size);
+	return 0;
+}
+
+static void dma_buf_pages_net_tx_free(struct dma_buf_pages *priv)
+{
+	kvfree(priv->net_tx.tx_bv);
+}
+
+const struct dma_buf_pages_type_ops net_tx_ops = {
+	.dma_buf_pages_init		= dma_buf_pages_net_tx_init,
+	.dma_buf_pages_release		= dma_buf_pages_net_tx_release,
+	.dma_buf_pages_destroy		= dma_buf_pages_net_tx_free,
+};
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e8e66d6407d0..93228a2fec47 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -22,6 +22,7 @@ 
 #include <linux/fs.h>
 #include <linux/dma-fence.h>
 #include <linux/wait.h>
+#include <linux/uio.h>
 #include <linux/genalloc.h>
 #include <linux/xarray.h>
 #include <net/page_pool.h>
@@ -555,6 +556,11 @@  struct dma_buf_pages_type_ops {
 				  struct page *page);
 };
 
+struct dma_buf_pages_net_tx {
+	struct iov_iter iter;
+	struct bio_vec *tx_bv;
+};
+
 struct dma_buf_pages_net_rx {
 	struct gen_pool *page_pool;
 	struct xarray bound_rxq_list;
@@ -579,6 +585,7 @@  struct dma_buf_pages {
 
 	union {
 		struct dma_buf_pages_net_rx net_rx;
+		struct dma_buf_pages_net_tx net_tx;
 	};
 };
 
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index b392cef9d3c6..546f211a7556 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -187,6 +187,7 @@  struct dma_buf_create_pages_info {
 };
 
 #define DMA_BUF_PAGES_NET_RX		(1 << 0)
+#define DMA_BUF_PAGES_NET_TX		(2 << 0)
 
 #define DMA_BUF_CREATE_PAGES	_IOW(DMA_BUF_BASE, 4, struct dma_buf_create_pages_info)