diff mbox series

[v2,06/13] fs: Add gfp_t parameter to create_page_buffers()

Message ID 20220218195739.585044-7-shr@fb.com (mailing list archive)
State New, archived
Headers show
Series Support sync buffered writes for io-uring | expand

Commit Message

Stefan Roesch Feb. 18, 2022, 7:57 p.m. UTC
This adds the gfp_t parameter to the create_page_buffers function.
This allows the caller to specify the required parameters.

Signed-off-by: Stefan Roesch <shr@fb.com>
---
 fs/buffer.c | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

Comments

kernel test robot Feb. 21, 2022, 12:18 a.m. UTC | #1
Hi Stefan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 9195e5e0adbb8a9a5ee9ef0f9dedf6340d827405]

url:    https://github.com/0day-ci/linux/commits/Stefan-Roesch/Support-sync-buffered-writes-for-io-uring/20220220-172629
base:   9195e5e0adbb8a9a5ee9ef0f9dedf6340d827405
config: sparc64-randconfig-s031-20220220 (https://download.01.org/0day-ci/archive/20220221/202202210828.SR411CM4-lkp@intel.com/config)
compiler: sparc64-linux-gcc (GCC) 11.2.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.4-dirty
        # https://github.com/0day-ci/linux/commit/e98a7c2a17960f81efc5968cbc386af7c088a8ed
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Stefan-Roesch/Support-sync-buffered-writes-for-io-uring/20220220-172629
        git checkout e98a7c2a17960f81efc5968cbc386af7c088a8ed
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=sparc64 SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


sparse warnings: (new ones prefixed by >>)
>> fs/buffer.c:2010:60: sparse: sparse: incorrect type in argument 4 (different base types) @@     expected restricted gfp_t [usertype] flags @@     got unsigned int flags @@
   fs/buffer.c:2010:60: sparse:     expected restricted gfp_t [usertype] flags
   fs/buffer.c:2010:60: sparse:     got unsigned int flags
>> fs/buffer.c:2147:87: sparse: sparse: incorrect type in argument 6 (different base types) @@     expected unsigned int flags @@     got restricted gfp_t [assigned] [usertype] gfp @@
   fs/buffer.c:2147:87: sparse:     expected unsigned int flags
   fs/buffer.c:2147:87: sparse:     got restricted gfp_t [assigned] [usertype] gfp

vim +2010 fs/buffer.c

  1991	
  1992	int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
  1993				get_block_t *get_block, const struct iomap *iomap,
  1994				unsigned int flags)
  1995	{
  1996		unsigned from = pos & (PAGE_SIZE - 1);
  1997		unsigned to = from + len;
  1998		struct inode *inode = folio->mapping->host;
  1999		unsigned block_start, block_end;
  2000		sector_t block;
  2001		int err = 0;
  2002		unsigned blocksize, bbits;
  2003		struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
  2004	
  2005		BUG_ON(!folio_test_locked(folio));
  2006		BUG_ON(from > PAGE_SIZE);
  2007		BUG_ON(to > PAGE_SIZE);
  2008		BUG_ON(from > to);
  2009	
> 2010		head = create_page_buffers(&folio->page, inode, 0, flags);
  2011		blocksize = head->b_size;
  2012		bbits = block_size_bits(blocksize);
  2013	
  2014		block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
  2015	
  2016		for(bh = head, block_start = 0; bh != head || !block_start;
  2017		    block++, block_start=block_end, bh = bh->b_this_page) {
  2018			block_end = block_start + blocksize;
  2019			if (block_end <= from || block_start >= to) {
  2020				if (folio_test_uptodate(folio)) {
  2021					if (!buffer_uptodate(bh))
  2022						set_buffer_uptodate(bh);
  2023				}
  2024				continue;
  2025			}
  2026			if (buffer_new(bh))
  2027				clear_buffer_new(bh);
  2028			if (!buffer_mapped(bh)) {
  2029				WARN_ON(bh->b_size != blocksize);
  2030				if (get_block) {
  2031					err = get_block(inode, block, bh, 1);
  2032					if (err)
  2033						break;
  2034				} else {
  2035					iomap_to_bh(inode, block, bh, iomap);
  2036				}
  2037	
  2038				if (buffer_new(bh)) {
  2039					clean_bdev_bh_alias(bh);
  2040					if (folio_test_uptodate(folio)) {
  2041						clear_buffer_new(bh);
  2042						set_buffer_uptodate(bh);
  2043						mark_buffer_dirty(bh);
  2044						continue;
  2045					}
  2046					if (block_end > to || block_start < from)
  2047						folio_zero_segments(folio,
  2048							to, block_end,
  2049							block_start, from);
  2050					continue;
  2051				}
  2052			}
  2053			if (folio_test_uptodate(folio)) {
  2054				if (!buffer_uptodate(bh))
  2055					set_buffer_uptodate(bh);
  2056				continue; 
  2057			}
  2058			if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
  2059			    !buffer_unwritten(bh) &&
  2060			     (block_start < from || block_end > to)) {
  2061				ll_rw_block(REQ_OP_READ, 0, 1, &bh);
  2062				*wait_bh++=bh;
  2063			}
  2064		}
  2065		/*
  2066		 * If we issued read requests - let them complete.
  2067		 */
  2068		while(wait_bh > wait) {
  2069			wait_on_buffer(*--wait_bh);
  2070			if (!buffer_uptodate(*wait_bh))
  2071				err = -EIO;
  2072		}
  2073		if (unlikely(err))
  2074			page_zero_new_buffers(&folio->page, from, to);
  2075		return err;
  2076	}
  2077	
  2078	int __block_write_begin(struct page *page, loff_t pos, unsigned len,
  2079			get_block_t *get_block)
  2080	{
  2081		return __block_write_begin_int(page_folio(page), pos, len, get_block,
  2082					       NULL, 0);
  2083	}
  2084	EXPORT_SYMBOL(__block_write_begin);
  2085	
  2086	static int __block_commit_write(struct inode *inode, struct page *page,
  2087			unsigned from, unsigned to)
  2088	{
  2089		unsigned block_start, block_end;
  2090		int partial = 0;
  2091		unsigned blocksize;
  2092		struct buffer_head *bh, *head;
  2093	
  2094		bh = head = page_buffers(page);
  2095		blocksize = bh->b_size;
  2096	
  2097		block_start = 0;
  2098		do {
  2099			block_end = block_start + blocksize;
  2100			if (block_end <= from || block_start >= to) {
  2101				if (!buffer_uptodate(bh))
  2102					partial = 1;
  2103			} else {
  2104				set_buffer_uptodate(bh);
  2105				mark_buffer_dirty(bh);
  2106			}
  2107			if (buffer_new(bh))
  2108				clear_buffer_new(bh);
  2109	
  2110			block_start = block_end;
  2111			bh = bh->b_this_page;
  2112		} while (bh != head);
  2113	
  2114		/*
  2115		 * If this is a partial write which happened to make all buffers
  2116		 * uptodate then we can optimize away a bogus readpage() for
  2117		 * the next read(). Here we 'discover' whether the page went
  2118		 * uptodate as a result of this (potentially partial) write.
  2119		 */
  2120		if (!partial)
  2121			SetPageUptodate(page);
  2122		return 0;
  2123	}
  2124	
  2125	/*
  2126	 * block_write_begin takes care of the basic task of block allocation and
  2127	 * bringing partial write blocks uptodate first.
  2128	 *
  2129	 * The filesystem needs to handle block truncation upon failure.
  2130	 */
  2131	int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  2132			unsigned flags, struct page **pagep, get_block_t *get_block)
  2133	{
  2134		pgoff_t index = pos >> PAGE_SHIFT;
  2135		struct page *page;
  2136		int status;
  2137		gfp_t gfp = 0;
  2138		bool no_wait = (flags & AOP_FLAG_NOWAIT);
  2139	
  2140		if (no_wait)
  2141			gfp = GFP_ATOMIC | __GFP_NOWARN;
  2142	
  2143		page = grab_cache_page_write_begin(mapping, index, flags);
  2144		if (!page)
  2145			return -ENOMEM;
  2146	
> 2147		status = __block_write_begin_int(page_folio(page), pos, len, get_block, NULL, gfp);
  2148		if (unlikely(status)) {
  2149			unlock_page(page);
  2150			put_page(page);
  2151			page = NULL;
  2152		}
  2153	
  2154		*pagep = page;
  2155		return status;
  2156	}
  2157	EXPORT_SYMBOL(block_write_begin);
  2158	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/fs/buffer.c b/fs/buffer.c
index 648e1cba6da3..ae588ae4b1c1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1682,13 +1682,20 @@  static inline int block_size_bits(unsigned int blocksize)
 	return ilog2(blocksize);
 }
 
-static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+static struct buffer_head *create_page_buffers(struct page *page,
+					struct inode *inode,
+					unsigned int b_state,
+					gfp_t flags)
 {
 	BUG_ON(!PageLocked(page));
 
-	if (!page_has_buffers(page))
-		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
-				     b_state);
+	if (!page_has_buffers(page)) {
+		gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | flags;
+
+		__create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
+				     b_state, gfp);
+	}
+
 	return page_buffers(page);
 }
 
@@ -1734,7 +1741,7 @@  int __block_write_full_page(struct inode *inode, struct page *page,
 	int write_flags = wbc_to_write_flags(wbc);
 
 	head = create_page_buffers(page, inode,
-					(1 << BH_Dirty)|(1 << BH_Uptodate));
+					(1 << BH_Dirty)|(1 << BH_Uptodate), __GFP_NOFAIL);
 
 	/*
 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
@@ -2000,7 +2007,7 @@  int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
 	BUG_ON(to > PAGE_SIZE);
 	BUG_ON(from > to);
 
-	head = create_page_buffers(&folio->page, inode, 0);
+	head = create_page_buffers(&folio->page, inode, 0, flags);
 	blocksize = head->b_size;
 	bbits = block_size_bits(blocksize);
 
@@ -2127,12 +2134,17 @@  int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
 	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *page;
 	int status;
+	gfp_t gfp = 0;
+	bool no_wait = (flags & AOP_FLAG_NOWAIT);
+
+	if (no_wait)
+		gfp = GFP_ATOMIC | __GFP_NOWARN;
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
 
-	status = __block_write_begin_int(page_folio(page), pos, len, get_block, NULL, flags);
+	status = __block_write_begin_int(page_folio(page), pos, len, get_block, NULL, gfp);
 	if (unlikely(status)) {
 		unlock_page(page);
 		put_page(page);
@@ -2280,7 +2292,7 @@  int block_read_full_page(struct page *page, get_block_t *get_block)
 	int nr, i;
 	int fully_mapped = 1;
 
-	head = create_page_buffers(page, inode, 0);
+	head = create_page_buffers(page, inode, 0, __GFP_NOFAIL);
 	blocksize = head->b_size;
 	bbits = block_size_bits(blocksize);