diff mbox series

[v5,3/3] block: unpin user pages belonging to a folio

Message ID 20240619023420.34527-4-kundan.kumar@samsung.com (mailing list archive)
State New
Headers show
Series block: add larger order folio instead of pages | expand

Commit Message

Kundan Kumar June 19, 2024, 2:34 a.m. UTC
Unpin pages which belong to same folio. This enables us to release folios
on I/O completion rather than looping through pages.

Introduce a function bio_release_folio() helps put refs by npages count.

Suggested-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Kundan Kumar <kundan.kumar@samsung.com>
---
 block/bio.c        | 13 ++++---------
 block/blk.h        |  7 +++++++
 include/linux/mm.h |  1 +
 mm/gup.c           | 13 +++++++++++++
 4 files changed, 25 insertions(+), 9 deletions(-)

--
2.25.1

Comments

Matthew Wilcox June 20, 2024, 3:51 a.m. UTC | #1
On Wed, Jun 19, 2024 at 08:04:20AM +0530, Kundan Kumar wrote:
>                 if (mark_dirty) {
>                         folio_lock(fi.folio);
>                         folio_mark_dirty(fi.folio);
>                         folio_unlock(fi.folio);
>                 }
> -               page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
> -               nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
> -                          fi.offset / PAGE_SIZE + 1;
> -               do {
> -                       bio_release_page(bio, page++);
> -               } while (--nr_pages != 0);
> +               bio_release_folio(bio, fi.folio, 1);

... no, call bio_release_folio(bio, fi.folio, nr_pages);

> @@ -1372,6 +1364,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>                 } else
>                         bio_iov_add_folio(bio, folio, len, folio_offset);
> 
> +               if (num_pages > 1)
> +                       bio_release_folio(bio, folio, num_pages - 1);
> +

... and drop this hunk.
diff mbox series

Patch

diff --git a/block/bio.c b/block/bio.c
index 3e75b5b0eb6e..68f6de0b0a08 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1186,20 +1186,12 @@  void __bio_release_pages(struct bio *bio, bool mark_dirty)
        struct folio_iter fi;

        bio_for_each_folio_all(fi, bio) {
-               struct page *page;
-               size_t nr_pages;
-
                if (mark_dirty) {
                        folio_lock(fi.folio);
                        folio_mark_dirty(fi.folio);
                        folio_unlock(fi.folio);
                }
-               page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
-               nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
-                          fi.offset / PAGE_SIZE + 1;
-               do {
-                       bio_release_page(bio, page++);
-               } while (--nr_pages != 0);
+               bio_release_folio(bio, fi.folio, 1);
        }
 }
 EXPORT_SYMBOL_GPL(__bio_release_pages);
@@ -1372,6 +1364,9 @@  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
                } else
                        bio_iov_add_folio(bio, folio, len, folio_offset);

+               if (num_pages > 1)
+                       bio_release_folio(bio, folio, num_pages - 1);
+
                /* Skip the pages which got added */
                i = i + (num_pages - 1);

diff --git a/block/blk.h b/block/blk.h
index d0bec44a2ffb..a657282c0e4a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -538,6 +538,13 @@  static inline void bio_release_page(struct bio *bio, struct page *page)
                unpin_user_page(page);
 }

+static inline void bio_release_folio(struct bio *bio, struct folio *folio,
+                                    unsigned long npages)
+{
+       if (bio_flagged(bio, BIO_PAGE_PINNED))
+               unpin_user_folio(folio, npages);
+}
+
 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);

 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..b902c6c39e2b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1618,6 +1618,7 @@  void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
                                      bool make_dirty);
 void unpin_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_folio(struct folio *folio, unsigned long npages);

 static inline bool is_cow_mapping(vm_flags_t flags)
 {
diff --git a/mm/gup.c b/mm/gup.c
index ca0f5cedce9b..bc96efa43d1b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -488,6 +488,19 @@  void unpin_user_pages(struct page **pages, unsigned long npages)
 }
 EXPORT_SYMBOL(unpin_user_pages);

+/**
+ * unpin_user_folio() - release pages of a folio
+ * @folio:  pointer to folio to be released
+ * @npages: number of pages of same folio
+ *
+ * Release npages of the folio
+ */
+void unpin_user_folio(struct folio *folio, unsigned long npages)
+{
+       gup_put_folio(folio, npages, FOLL_PIN);
+}
+EXPORT_SYMBOL(unpin_user_folio);
+
 /*
  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write