diff mbox series

[v3,18/26] page_pool: Allow page_pool_recycle_direct() to take a netmem or a page

Message ID 20230111042214.907030-19-willy@infradead.org (mailing list archive)
State New
Headers show
Series Split netmem from struct page | expand

Commit Message

Matthew Wilcox Jan. 11, 2023, 4:22 a.m. UTC
With no better name for a variant of page_pool_recycle_direct() which
takes a netmem instead of a page, use _Generic() to allow it to take
either a page or a netmem argument.  It's a bit ugly, but maybe not
the worst alternative?

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
---
 include/net/page_pool.h | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

Comments

kernel test robot Jan. 11, 2023, 12:48 p.m. UTC | #1
Hi Matthew,

I love your patch! Yet something to improve:

[auto build test ERROR on bpf-next/master]
[also build test ERROR on bpf/master net/master net-next/master linus/master v6.2-rc3 next-20230111]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/netmem-Create-new-type/20230111-122554
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link:    https://lore.kernel.org/r/20230111042214.907030-19-willy%40infradead.org
patch subject: [PATCH v3 18/26] page_pool: Allow page_pool_recycle_direct() to take a netmem or a page
config: i386-randconfig-a013
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/4990e37aed83df2ccca87e95966949b74056dbfe
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Matthew-Wilcox-Oracle/netmem-Create-new-type/20230111-122554
        git checkout 4990e37aed83df2ccca87e95966949b74056dbfe
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/net/ethernet/broadcom/bnxt/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> drivers/net/ethernet/broadcom/bnxt/bnxt.c:2978:4: error: controlling expression type 'void *' not compatible with any generic association type
                           page_pool_recycle_direct(rxr->page_pool, data);
                           ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/net/page_pool.h:497:54: note: expanded from macro 'page_pool_recycle_direct'
   #define page_pool_recycle_direct(pool, mem)     _Generic((mem),         \
                                                            ^~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt.c:12677:44: warning: shift count >= width of type [-Wshift-count-overflow]
           if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
                                                     ^~~~~~~~~~~~~~~~
   include/linux/dma-mapping.h:76:54: note: expanded from macro 'DMA_BIT_MASK'
   #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
                                                        ^ ~~~
   1 warning and 1 error generated.


vim +2978 drivers/net/ethernet/broadcom/bnxt/bnxt.c

c0c050c58d8409 Michael Chan       2015-10-22  2931  
975bc99a4a397d Michael Chan       2020-10-04  2932  static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c58d8409 Michael Chan       2015-10-22  2933  {
975bc99a4a397d Michael Chan       2020-10-04  2934  	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c58d8409 Michael Chan       2015-10-22  2935  	struct pci_dev *pdev = bp->pdev;
975bc99a4a397d Michael Chan       2020-10-04  2936  	struct bnxt_tpa_idx_map *map;
975bc99a4a397d Michael Chan       2020-10-04  2937  	int i, max_idx, max_agg_idx;
c0c050c58d8409 Michael Chan       2015-10-22  2938  
c0c050c58d8409 Michael Chan       2015-10-22  2939  	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
c0c050c58d8409 Michael Chan       2015-10-22  2940  	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a4a397d Michael Chan       2020-10-04  2941  	if (!rxr->rx_tpa)
975bc99a4a397d Michael Chan       2020-10-04  2942  		goto skip_rx_tpa_free;
c0c050c58d8409 Michael Chan       2015-10-22  2943  
975bc99a4a397d Michael Chan       2020-10-04  2944  	for (i = 0; i < bp->max_tpa; i++) {
975bc99a4a397d Michael Chan       2020-10-04  2945  		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
c0c050c58d8409 Michael Chan       2015-10-22  2946  		u8 *data = tpa_info->data;
c0c050c58d8409 Michael Chan       2015-10-22  2947  
c0c050c58d8409 Michael Chan       2015-10-22  2948  		if (!data)
c0c050c58d8409 Michael Chan       2015-10-22  2949  			continue;
c0c050c58d8409 Michael Chan       2015-10-22  2950  
975bc99a4a397d Michael Chan       2020-10-04  2951  		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
975bc99a4a397d Michael Chan       2020-10-04  2952  				       bp->rx_buf_use_size, bp->rx_dir,
c519fe9a4f0d1a Shannon Nelson     2017-05-09  2953  				       DMA_ATTR_WEAK_ORDERING);
c0c050c58d8409 Michael Chan       2015-10-22  2954  
c0c050c58d8409 Michael Chan       2015-10-22  2955  		tpa_info->data = NULL;
c0c050c58d8409 Michael Chan       2015-10-22  2956  
720908e5f816d5 Jakub Kicinski     2021-12-27  2957  		skb_free_frag(data);
c0c050c58d8409 Michael Chan       2015-10-22  2958  	}
c0c050c58d8409 Michael Chan       2015-10-22  2959  
975bc99a4a397d Michael Chan       2020-10-04  2960  skip_rx_tpa_free:
1affc01fdc6035 Edwin Peer         2021-09-12  2961  	if (!rxr->rx_buf_ring)
1affc01fdc6035 Edwin Peer         2021-09-12  2962  		goto skip_rx_buf_free;
1affc01fdc6035 Edwin Peer         2021-09-12  2963  
975bc99a4a397d Michael Chan       2020-10-04  2964  	for (i = 0; i < max_idx; i++) {
975bc99a4a397d Michael Chan       2020-10-04  2965  		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3ed3a83e3f3871 Michael Chan       2017-03-28  2966  		dma_addr_t mapping = rx_buf->mapping;
6bb19474391d17 Michael Chan       2017-02-06  2967  		void *data = rx_buf->data;
c0c050c58d8409 Michael Chan       2015-10-22  2968  
c0c050c58d8409 Michael Chan       2015-10-22  2969  		if (!data)
c0c050c58d8409 Michael Chan       2015-10-22  2970  			continue;
c0c050c58d8409 Michael Chan       2015-10-22  2971  
c0c050c58d8409 Michael Chan       2015-10-22  2972  		rx_buf->data = NULL;
3ed3a83e3f3871 Michael Chan       2017-03-28  2973  		if (BNXT_RX_PAGE_MODE(bp)) {
3ed3a83e3f3871 Michael Chan       2017-03-28  2974  			mapping -= bp->rx_dma_offset;
975bc99a4a397d Michael Chan       2020-10-04  2975  			dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
975bc99a4a397d Michael Chan       2020-10-04  2976  					     bp->rx_dir,
c519fe9a4f0d1a Shannon Nelson     2017-05-09  2977  					     DMA_ATTR_WEAK_ORDERING);
322b87ca55f2f3 Andy Gospodarek    2019-07-08 @2978  			page_pool_recycle_direct(rxr->page_pool, data);
3ed3a83e3f3871 Michael Chan       2017-03-28  2979  		} else {
c519fe9a4f0d1a Shannon Nelson     2017-05-09  2980  			dma_unmap_single_attrs(&pdev->dev, mapping,
975bc99a4a397d Michael Chan       2020-10-04  2981  					       bp->rx_buf_use_size, bp->rx_dir,
c519fe9a4f0d1a Shannon Nelson     2017-05-09  2982  					       DMA_ATTR_WEAK_ORDERING);
720908e5f816d5 Jakub Kicinski     2021-12-27  2983  			skb_free_frag(data);
c0c050c58d8409 Michael Chan       2015-10-22  2984  		}
3ed3a83e3f3871 Michael Chan       2017-03-28  2985  	}
1affc01fdc6035 Edwin Peer         2021-09-12  2986  
1affc01fdc6035 Edwin Peer         2021-09-12  2987  skip_rx_buf_free:
1affc01fdc6035 Edwin Peer         2021-09-12  2988  	if (!rxr->rx_agg_ring)
1affc01fdc6035 Edwin Peer         2021-09-12  2989  		goto skip_rx_agg_free;
1affc01fdc6035 Edwin Peer         2021-09-12  2990  
975bc99a4a397d Michael Chan       2020-10-04  2991  	for (i = 0; i < max_agg_idx; i++) {
975bc99a4a397d Michael Chan       2020-10-04  2992  		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
c0c050c58d8409 Michael Chan       2015-10-22  2993  		struct page *page = rx_agg_buf->page;
c0c050c58d8409 Michael Chan       2015-10-22  2994  
c0c050c58d8409 Michael Chan       2015-10-22  2995  		if (!page)
c0c050c58d8409 Michael Chan       2015-10-22  2996  			continue;
c0c050c58d8409 Michael Chan       2015-10-22  2997  
9a6aa350488533 Andy Gospodarek    2022-04-08  2998  		if (BNXT_RX_PAGE_MODE(bp)) {
9a6aa350488533 Andy Gospodarek    2022-04-08  2999  			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
9a6aa350488533 Andy Gospodarek    2022-04-08  3000  					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
9a6aa350488533 Andy Gospodarek    2022-04-08  3001  					     DMA_ATTR_WEAK_ORDERING);
9a6aa350488533 Andy Gospodarek    2022-04-08  3002  			rx_agg_buf->page = NULL;
9a6aa350488533 Andy Gospodarek    2022-04-08  3003  			__clear_bit(i, rxr->rx_agg_bmap);
9a6aa350488533 Andy Gospodarek    2022-04-08  3004  
9a6aa350488533 Andy Gospodarek    2022-04-08  3005  			page_pool_recycle_direct(rxr->page_pool, page);
9a6aa350488533 Andy Gospodarek    2022-04-08  3006  		} else {
c519fe9a4f0d1a Shannon Nelson     2017-05-09  3007  			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
df70303dd14623 Christophe JAILLET 2021-08-22  3008  					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
c519fe9a4f0d1a Shannon Nelson     2017-05-09  3009  					     DMA_ATTR_WEAK_ORDERING);
c0c050c58d8409 Michael Chan       2015-10-22  3010  			rx_agg_buf->page = NULL;
975bc99a4a397d Michael Chan       2020-10-04  3011  			__clear_bit(i, rxr->rx_agg_bmap);
c0c050c58d8409 Michael Chan       2015-10-22  3012  
c0c050c58d8409 Michael Chan       2015-10-22  3013  			__free_page(page);
c0c050c58d8409 Michael Chan       2015-10-22  3014  		}
9a6aa350488533 Andy Gospodarek    2022-04-08  3015  	}
1affc01fdc6035 Edwin Peer         2021-09-12  3016  
1affc01fdc6035 Edwin Peer         2021-09-12  3017  skip_rx_agg_free:
89d0a06c516339 Michael Chan       2016-04-25  3018  	if (rxr->rx_page) {
89d0a06c516339 Michael Chan       2016-04-25  3019  		__free_page(rxr->rx_page);
89d0a06c516339 Michael Chan       2016-04-25  3020  		rxr->rx_page = NULL;
89d0a06c516339 Michael Chan       2016-04-25  3021  	}
ec4d8e7cf024e4 Michael Chan       2019-07-29  3022  	map = rxr->rx_tpa_idx_map;
ec4d8e7cf024e4 Michael Chan       2019-07-29  3023  	if (map)
ec4d8e7cf024e4 Michael Chan       2019-07-29  3024  		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
c0c050c58d8409 Michael Chan       2015-10-22  3025  }
975bc99a4a397d Michael Chan       2020-10-04  3026
Matthew Wilcox Jan. 11, 2023, 1:43 p.m. UTC | #2
On Wed, Jan 11, 2023 at 08:48:30PM +0800, kernel test robot wrote:
> >> drivers/net/ethernet/broadcom/bnxt/bnxt.c:2978:4: error: controlling expression type 'void *' not compatible with any generic association type
>                            page_pool_recycle_direct(rxr->page_pool, data);
>                            ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

I swear I did an allmodconfig build ... don't know how I missed this
one.  I think I'll make the page_pool_recycle_direct() macro accept
void * as well, and treat it as a page.  Once we finish the conversion
to netmem, this problem will go away.

ie this:

+++ b/include/net/page_pool.h
@@ -485,7 +485,8 @@ static inline void __page_pool_recycle_page_direct(struct page_pool *pool,
 
 #define page_pool_recycle_direct(pool, mem)    _Generic((mem),         \
        struct netmem *: __page_pool_recycle_direct(pool, (struct netmem *)mem),                \
-       struct page *:   __page_pool_recycle_page_direct(pool, (struct page *)mem))
+       struct page *:   __page_pool_recycle_page_direct(pool, (struct page *)mem),     \
+       void *:  __page_pool_recycle_page_direct(pool, (struct page *)mem))
 
 #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT        \
                (sizeof(dma_addr_t) > sizeof(unsigned long))
Jesper Dangaard Brouer Jan. 12, 2023, 8:45 a.m. UTC | #3
On 11/01/2023 14.43, Matthew Wilcox wrote:
> On Wed, Jan 11, 2023 at 08:48:30PM +0800, kernel test robot wrote:
>>>> drivers/net/ethernet/broadcom/bnxt/bnxt.c:2978:4: error: controlling expression type 'void *' not compatible with any generic association type
>>                             page_pool_recycle_direct(rxr->page_pool, data);
>>                             ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 
> I swear I did an allmodconfig build ... don't know how I missed this
> one.  I think I'll make the page_pool_recycle_direct() macro accept
> void * as well, and treat it as a page.  Once we finish the conversion
> to netmem, this problem will go away.
> 
> ie this:
> 
> +++ b/include/net/page_pool.h
> @@ -485,7 +485,8 @@ static inline void __page_pool_recycle_page_direct(struct page_pool *pool,
>   
>   #define page_pool_recycle_direct(pool, mem)    _Generic((mem),         \
>          struct netmem *: __page_pool_recycle_direct(pool, (struct netmem *)mem),                \
> -       struct page *:   __page_pool_recycle_page_direct(pool, (struct page *)mem))
> +       struct page *:   __page_pool_recycle_page_direct(pool, (struct page *)mem),     \
> +       void *:  __page_pool_recycle_page_direct(pool, (struct page *)mem))
>   

I'm okay with this change, and you can add my acked by.

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>

Maybe broadcom/bnxt driver should (later) be converted to not use void
pointers... cc. Andy.

--Jesper
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index e205eaed21a5..64ac397dcd9f 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -482,12 +482,22 @@  static inline void page_pool_put_full_page(struct page_pool *pool,
 }
 
 /* Same as above but the caller must guarantee safe context. e.g NAPI */
-static inline void page_pool_recycle_direct(struct page_pool *pool,
+static inline void __page_pool_recycle_direct(struct page_pool *pool,
+					    struct netmem *nmem)
+{
+	page_pool_put_full_netmem(pool, nmem, true);
+}
+
+static inline void __page_pool_recycle_page_direct(struct page_pool *pool,
 					    struct page *page)
 {
-	page_pool_put_full_page(pool, page, true);
+	page_pool_put_full_netmem(pool, page_netmem(page), true);
 }
 
+#define page_pool_recycle_direct(pool, mem)	_Generic((mem),		\
+	struct netmem *: __page_pool_recycle_direct(pool, (struct netmem *)mem),		\
+	struct page *:	 __page_pool_recycle_page_direct(pool, (struct page *)mem))
+
 #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
 		(sizeof(dma_addr_t) > sizeof(unsigned long))