diff mbox series

[33/69] fs: Introduce aops->read_folio

Message ID 20220429172556.3011843-34-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Filesystem/page cache patches for 5.19 | expand

Commit Message

Matthew Wilcox April 29, 2022, 5:25 p.m. UTC
The ->readpage and ->read_folio operations are always called with the
same set of bits; it's only the type which differs.  Use a union to
help with the transition and convert all the callers to use ->read_folio.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/buffer.c        |  2 +-
 include/linux/fs.h |  5 ++++-
 mm/filemap.c       |  6 +++---
 mm/readahead.c     | 10 +++++-----
 4 files changed, 13 insertions(+), 10 deletions(-)

Comments

Christoph Hellwig May 3, 2022, 2:42 p.m. UTC | #1
On Fri, Apr 29, 2022 at 06:25:20PM +0100, Matthew Wilcox (Oracle) wrote:
> The ->readpage and ->read_folio operations are always called with the
> same set of bits; it's only the type which differs.  Use a union to
> help with the transition and convert all the callers to use ->read_folio.

I don't think this is going to make CFI very happy.
Kees Cook May 6, 2022, 8:22 p.m. UTC | #2
On Tue, May 03, 2022 at 07:42:12AM -0700, Christoph Hellwig wrote:
> On Fri, Apr 29, 2022 at 06:25:20PM +0100, Matthew Wilcox (Oracle) wrote:
> > The ->readpage and ->read_folio operations are always called with the
> > same set of bits; it's only the type which differs.  Use a union to
> > help with the transition and convert all the callers to use ->read_folio.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> > ---
> >  fs/buffer.c        |  2 +-
> >  include/linux/fs.h |  5 ++++-
> >  mm/filemap.c       |  6 +++---
> >  mm/readahead.c     | 10 +++++-----
> >  4 files changed, 13 insertions(+), 10 deletions(-)
> > 
> > diff --git a/fs/buffer.c b/fs/buffer.c
> > index 9737e0dbe3ec..5826ef29fe70 100644
> > --- a/fs/buffer.c
> > +++ b/fs/buffer.c
> > @@ -2824,7 +2824,7 @@ int nobh_truncate_page(struct address_space *mapping,
> >  
> >  	/* Ok, it's mapped. Make sure it's up-to-date */
> >  	if (!folio_test_uptodate(folio)) {
> > -		err = mapping->a_ops->readpage(NULL, &folio->page);
> > +		err = mapping->a_ops->read_folio(NULL, folio);
> >  		if (err) {
> >  			folio_put(folio);
> >  			goto out;
> > diff --git a/include/linux/fs.h b/include/linux/fs.h
> > index 2be852661a29..5ecc4b74204d 100644
> > --- a/include/linux/fs.h
> > +++ b/include/linux/fs.h
> > @@ -335,7 +335,10 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
> >  
> >  struct address_space_operations {
> >  	int (*writepage)(struct page *page, struct writeback_control *wbc);
> > -	int (*readpage)(struct file *, struct page *);
> > +	union {
> > +		int (*readpage)(struct file *, struct page *);
> > +		int (*read_folio)(struct file *, struct folio *);
> > +	};
> 
> I don't think this is going to make CFI very happy.

Good news is all the readpage instances are removed at the end of the
series. :)

But yes, bad news is that bisectability under CFI at this point in the
series CFI would trap every instance of ->read_folio, since the assigned
prototype:

 static int ext4_readpage(struct file *file, struct page *page)
 ...
 static const struct address_space_operations ext4_aops = {
	.readpage		= ext4_readpage,

doesn't match the call prototype:

 int (*read_folio)(struct file *, struct folio *);
 ...
 err = mapping->a_ops->read_folio(NULL, folio);

If you want to survive this, you'll need to avoid the union and add a
wrapper to be removed when you remove read_page:

 struct address_space_operations {
    int (*writepage)(struct page *page, struct writeback_control *wbc);
    int (*readpage)(struct file *, struct page *);
    int (*read_folio)(struct file *, struct folio *);

 #define READ_FOLIO(ops, p, folio_or_page)	({		\
	int __rf_err;						\
	if (ops->read_folio)					\
		__rf_err = ops->read_folio(p, folio_or_page);	\
	else							\
		__rf_err = ops->read_page(p, folio_or_page);	\
	__rf_err;						\
 })
 ...
 err = READ_FOLIO(mapping->a_ops, NULL, folio);

Then only those cases that have had an appropriately matching callback
assigned to read_folio will call it.

And then you can drop the macro in "mm,fs: Remove stray references to ->readpage"

-Kees
diff mbox series

Patch

diff --git a/fs/buffer.c b/fs/buffer.c
index 9737e0dbe3ec..5826ef29fe70 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2824,7 +2824,7 @@  int nobh_truncate_page(struct address_space *mapping,
 
 	/* Ok, it's mapped. Make sure it's up-to-date */
 	if (!folio_test_uptodate(folio)) {
-		err = mapping->a_ops->readpage(NULL, &folio->page);
+		err = mapping->a_ops->read_folio(NULL, folio);
 		if (err) {
 			folio_put(folio);
 			goto out;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2be852661a29..5ecc4b74204d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -335,7 +335,10 @@  static inline bool is_sync_kiocb(struct kiocb *kiocb)
 
 struct address_space_operations {
 	int (*writepage)(struct page *page, struct writeback_control *wbc);
-	int (*readpage)(struct file *, struct page *);
+	union {
+		int (*readpage)(struct file *, struct page *);
+		int (*read_folio)(struct file *, struct folio *);
+	};
 
 	/* Write back some dirty pages from this mapping. */
 	int (*writepages)(struct address_space *, struct writeback_control *);
diff --git a/mm/filemap.c b/mm/filemap.c
index c15cfc28f9ce..132015e42384 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2419,7 +2419,7 @@  static int filemap_read_folio(struct file *file, struct address_space *mapping,
 	 */
 	folio_clear_error(folio);
 	/* Start the actual read. The read will unlock the page. */
-	error = mapping->a_ops->readpage(file, &folio->page);
+	error = mapping->a_ops->read_folio(file, folio);
 	if (error)
 		return error;
 
@@ -3447,7 +3447,7 @@  int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct address_space *mapping = file->f_mapping;
 
-	if (!mapping->a_ops->readpage)
+	if (!mapping->a_ops->read_folio)
 		return -ENOEXEC;
 	file_accessed(file);
 	vma->vm_ops = &generic_file_vm_ops;
@@ -3506,7 +3506,7 @@  static struct folio *do_read_cache_folio(struct address_space *mapping,
 		if (filler)
 			err = filler(data, &folio->page);
 		else
-			err = mapping->a_ops->readpage(data, &folio->page);
+			err = mapping->a_ops->read_folio(data, folio);
 
 		if (err < 0) {
 			folio_put(folio);
diff --git a/mm/readahead.c b/mm/readahead.c
index 947a7a1fd867..2004aa58ae24 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -15,7 +15,7 @@ 
  * explicitly requested by the application.  Readahead only ever
  * attempts to read folios that are not yet in the page cache.  If a
  * folio is present but not up-to-date, readahead will not try to read
- * it. In that case a simple ->readpage() will be requested.
+ * it. In that case a simple ->read_folio() will be requested.
  *
  * Readahead is triggered when an application read request (whether a
  * system call or a page fault) finds that the requested folio is not in
@@ -78,7 +78,7 @@ 
  * address space operation, for which mpage_readahead() is a canonical
  * implementation.  ->readahead() should normally initiate reads on all
  * folios, but may fail to read any or all folios without causing an I/O
- * error.  The page cache reading code will issue a ->readpage() request
+ * error.  The page cache reading code will issue a ->read_folio() request
  * for any folio which ->readahead() did not read, and only an error
  * from this will be final.
  *
@@ -110,7 +110,7 @@ 
  * were not fetched with readahead_folio().  This will allow a
  * subsequent synchronous readahead request to try them again.  If they
  * are left in the page cache, then they will be read individually using
- * ->readpage() which may be less efficient.
+ * ->read_folio() which may be less efficient.
  */
 
 #include <linux/kernel.h>
@@ -172,7 +172,7 @@  static void read_pages(struct readahead_control *rac)
 		}
 	} else {
 		while ((folio = readahead_folio(rac)))
-			aops->readpage(rac->file, &folio->page);
+			aops->read_folio(rac->file, folio);
 	}
 
 	blk_finish_plug(&plug);
@@ -302,7 +302,7 @@  void force_page_cache_ra(struct readahead_control *ractl,
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages, index;
 
-	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
+	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
 		return;
 
 	/*