diff mbox series

[10/33] xsk: support virtio DMA map

Message ID 20230202110058.130695-11-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy | expand

Checks

Context Check Description
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2 this patch: 2
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 1 this patch: 1
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo Feb. 2, 2023, 11 a.m. UTC
When device is a virtio device, use virtio's DMA interface.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 net/xdp/xsk_buff_pool.c | 59 +++++++++++++++++++++++++++++++----------
 1 file changed, 45 insertions(+), 14 deletions(-)

Comments

kernel test robot Feb. 5, 2023, 10:04 p.m. UTC | #1
Hi Xuan,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on net-next/master]
[also build test ERROR on mst-vhost/linux-next linus/master v6.2-rc6 next-20230203]
[cannot apply to net/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Xuan-Zhuo/virtio_ring-virtqueue_add-support-premapped/20230202-190707
patch link:    https://lore.kernel.org/r/20230202110058.130695-11-xuanzhuo%40linux.alibaba.com
patch subject: [PATCH 10/33] xsk: support virtio DMA map
config: i386-debian-10.3-kvm (https://download.01.org/0day-ci/archive/20230206/202302060542.IxBGSiKh-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/370aefebcea755f7c4c14e16f8dcb5540769fd26
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Xuan-Zhuo/virtio_ring-virtqueue_add-support-premapped/20230202-190707
        git checkout 370aefebcea755f7c4c14e16f8dcb5540769fd26
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=i386 olddefconfig
        make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   ld: net/xdp/xsk_buff_pool.o: in function `xp_alloc':
>> net/xdp/xsk_buff_pool.c:575: undefined reference to `is_virtio_device'
>> ld: net/xdp/xsk_buff_pool.c:576: undefined reference to `virtio_dma_sync_signle_range_for_device'
   ld: net/xdp/xsk_buff_pool.o: in function `__xp_dma_unmap':
   net/xdp/xsk_buff_pool.c:338: undefined reference to `is_virtio_device'
>> ld: net/xdp/xsk_buff_pool.c:339: undefined reference to `virtio_dma_unmap'
   ld: net/xdp/xsk_buff_pool.o: in function `xp_dma_map':
   net/xdp/xsk_buff_pool.c:443: undefined reference to `is_virtio_device'
   ld: net/xdp/xsk_buff_pool.c:443: undefined reference to `virtio_dma_sync_signle_range_for_device'
>> ld: net/xdp/xsk_buff_pool.c:443: undefined reference to `virtio_dma_sync_signle_range_for_cpu'
>> ld: net/xdp/xsk_buff_pool.c:458: undefined reference to `virtio_dma_map_page'
>> ld: net/xdp/xsk_buff_pool.c:461: undefined reference to `virtio_dma_mapping_error'
>> ld: net/xdp/xsk_buff_pool.c:464: undefined reference to `virtio_dma_need_sync'
>> ld: net/xdp/xsk_buff_pool.c:457: undefined reference to `is_virtio_device'


vim +575 net/xdp/xsk_buff_pool.c

   424	
   425	int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
   426		       unsigned long attrs, struct page **pages, u32 nr_pages)
   427	{
   428		struct xsk_dma_map *dma_map;
   429		dma_addr_t dma;
   430		int err;
   431		u32 i;
   432	
   433		dma_map = xp_find_dma_map(pool);
   434		if (dma_map) {
   435			err = xp_init_dma_info(pool, dma_map);
   436			if (err)
   437				return err;
   438	
   439			refcount_inc(&dma_map->users);
   440			return 0;
   441		}
   442	
 > 443		if (is_virtio_device(dev)) {
   444			pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
   445			pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;
   446	
   447		} else {
   448			pool->dma_sync_for_cpu = dma_sync_for_cpu;
   449			pool->dma_sync_for_device = dma_sync_for_device;
   450		}
   451	
   452		dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
   453		if (!dma_map)
   454			return -ENOMEM;
   455	
   456		for (i = 0; i < dma_map->dma_pages_cnt; i++) {
 > 457			if (is_virtio_device(dev)) {
 > 458				dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
   459							  DMA_BIDIRECTIONAL);
   460	
 > 461				if (virtio_dma_mapping_error(dev, dma))
   462					goto err;
   463	
 > 464				if (virtio_dma_need_sync(dev, dma))
   465					dma_map->dma_need_sync = true;
   466	
   467			} else {
   468				dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
   469							 DMA_BIDIRECTIONAL, attrs);
   470	
   471				if (dma_mapping_error(dev, dma))
   472					goto err;
   473	
   474				if (dma_need_sync(dev, dma))
   475					dma_map->dma_need_sync = true;
   476			}
   477			dma_map->dma_pages[i] = dma;
   478		}
   479	
   480		if (pool->unaligned)
   481			xp_check_dma_contiguity(dma_map);
   482	
   483		err = xp_init_dma_info(pool, dma_map);
   484		if (err) {
   485			__xp_dma_unmap(dma_map, attrs);
   486			return err;
   487		}
   488	
   489		return 0;
   490	err:
   491		__xp_dma_unmap(dma_map, attrs);
   492		return -ENOMEM;
   493	}
   494	EXPORT_SYMBOL(xp_dma_map);
   495	
   496	static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
   497						  u64 addr)
   498	{
   499		return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
   500	}
   501	
   502	static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
   503	{
   504		*addr = xp_unaligned_extract_addr(*addr);
   505		if (*addr >= pool->addrs_cnt ||
   506		    *addr + pool->chunk_size > pool->addrs_cnt ||
   507		    xp_addr_crosses_non_contig_pg(pool, *addr))
   508			return false;
   509		return true;
   510	}
   511	
   512	static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
   513	{
   514		*addr = xp_aligned_extract_addr(pool, *addr);
   515		return *addr < pool->addrs_cnt;
   516	}
   517	
   518	static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
   519	{
   520		struct xdp_buff_xsk *xskb;
   521		u64 addr;
   522		bool ok;
   523	
   524		if (pool->free_heads_cnt == 0)
   525			return NULL;
   526	
   527		for (;;) {
   528			if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
   529				pool->fq->queue_empty_descs++;
   530				return NULL;
   531			}
   532	
   533			ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
   534			     xp_check_aligned(pool, &addr);
   535			if (!ok) {
   536				pool->fq->invalid_descs++;
   537				xskq_cons_release(pool->fq);
   538				continue;
   539			}
   540			break;
   541		}
   542	
   543		if (pool->unaligned) {
   544			xskb = pool->free_heads[--pool->free_heads_cnt];
   545			xp_init_xskb_addr(xskb, pool, addr);
   546			if (pool->dma_pages_cnt)
   547				xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
   548		} else {
   549			xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
   550		}
   551	
   552		xskq_cons_release(pool->fq);
   553		return xskb;
   554	}
   555	
   556	struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
   557	{
   558		struct xdp_buff_xsk *xskb;
   559	
   560		if (!pool->free_list_cnt) {
   561			xskb = __xp_alloc(pool);
   562			if (!xskb)
   563				return NULL;
   564		} else {
   565			pool->free_list_cnt--;
   566			xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
   567						free_list_node);
   568			list_del_init(&xskb->free_list_node);
   569		}
   570	
   571		xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
   572		xskb->xdp.data_meta = xskb->xdp.data;
   573	
   574		if (pool->dma_need_sync) {
 > 575			if (is_virtio_device(pool->dev))
 > 576				virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
   577									pool->frame_len,
   578									DMA_BIDIRECTIONAL);
   579			else
   580				dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
   581								 pool->frame_len,
   582								 DMA_BIDIRECTIONAL);
   583		}
   584		return &xskb->xdp;
   585	}
   586	EXPORT_SYMBOL(xp_alloc);
   587
diff mbox series

Patch

diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 78e325e195fa..e2785aca8396 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -3,6 +3,7 @@ 
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
 #include <net/xdp_sock_drv.h>
+#include <linux/virtio.h>
 
 #include "xsk_queue.h"
 #include "xdp_umem.h"
@@ -334,8 +335,12 @@  static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
 		dma = &dma_map->dma_pages[i];
 		if (*dma) {
 			*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
-			dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL, attrs);
+			if (is_virtio_device(dma_map->dev))
+				virtio_dma_unmap(dma_map->dev, *dma, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL);
+			else
+				dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
+						     DMA_BIDIRECTIONAL, attrs);
 			*dma = 0;
 		}
 	}
@@ -435,22 +440,40 @@  int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 		return 0;
 	}
 
-	pool->dma_sync_for_cpu = dma_sync_for_cpu;
-	pool->dma_sync_for_device = dma_sync_for_device;
+	if (is_virtio_device(dev)) {
+		pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
+		pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;
+
+	} else {
+		pool->dma_sync_for_cpu = dma_sync_for_cpu;
+		pool->dma_sync_for_device = dma_sync_for_device;
+	}
 
 	dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
 	if (!dma_map)
 		return -ENOMEM;
 
 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
-		dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
-					 DMA_BIDIRECTIONAL, attrs);
-		if (dma_mapping_error(dev, dma)) {
-			__xp_dma_unmap(dma_map, attrs);
-			return -ENOMEM;
+		if (is_virtio_device(dev)) {
+			dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
+						  DMA_BIDIRECTIONAL);
+
+			if (virtio_dma_mapping_error(dev, dma))
+				goto err;
+
+			if (virtio_dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
+
+		} else {
+			dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL, attrs);
+
+			if (dma_mapping_error(dev, dma))
+				goto err;
+
+			if (dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
 		}
-		if (dma_need_sync(dev, dma))
-			dma_map->dma_need_sync = true;
 		dma_map->dma_pages[i] = dma;
 	}
 
@@ -464,6 +487,9 @@  int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 	}
 
 	return 0;
+err:
+	__xp_dma_unmap(dma_map, attrs);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(xp_dma_map);
 
@@ -546,9 +572,14 @@  struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 	xskb->xdp.data_meta = xskb->xdp.data;
 
 	if (pool->dma_need_sync) {
-		dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
-						 pool->frame_len,
-						 DMA_BIDIRECTIONAL);
+		if (is_virtio_device(pool->dev))
+			virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
+								pool->frame_len,
+								DMA_BIDIRECTIONAL);
+		else
+			dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+							 pool->frame_len,
+							 DMA_BIDIRECTIONAL);
 	}
 	return &xskb->xdp;
 }