diff mbox series

[10/75] mm/gup: Remove hpage_pincount_add()

Message ID 20220204195852.1751729-11-willy@infradead.org (mailing list archive)
State New
Headers show
Series MM folio patches for 5.18 | expand

Commit Message

Matthew Wilcox Feb. 4, 2022, 7:57 p.m. UTC
It's clearer to call atomic_add() in the callers; the assertions clearly
can't fire there because they're part of the condition for calling
atomic_add().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/gup.c | 33 +++++++++++----------------------
 1 file changed, 11 insertions(+), 22 deletions(-)

Comments

John Hubbard Feb. 4, 2022, 9:29 p.m. UTC | #1
On 2/4/22 11:57, Matthew Wilcox (Oracle) wrote:
> It's clearer to call atomic_add() in the callers; the assertions clearly
> can't fire there because they're part of the condition for calling
> atomic_add().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/gup.c | 33 +++++++++++----------------------
>   1 file changed, 11 insertions(+), 22 deletions(-)

Looks nice.

Reviewed-by: John Hubbard <jhubbard@nvidia.com>

thanks,
Christoph Hellwig Feb. 7, 2022, 7:46 a.m. UTC | #2
Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 923a0d44203c..60168a09d52a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,14 +29,6 @@  struct follow_page_context {
 	unsigned int page_mask;
 };
 
-static void hpage_pincount_add(struct page *page, int refs)
-{
-	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
-	VM_BUG_ON_PAGE(page != compound_head(page), page);
-
-	atomic_add(refs, compound_pincount_ptr(page));
-}
-
 static void hpage_pincount_sub(struct page *page, int refs)
 {
 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
@@ -151,17 +143,17 @@  __maybe_unused struct page *try_grab_compound_head(struct page *page,
 			return NULL;
 
 		/*
-		 * When pinning a compound page of order > 1 (which is what
-		 * hpage_pincount_available() checks for), use an exact count to
-		 * track it, via hpage_pincount_add/_sub().
+		 * When pinning a compound page of order > 1 (which is
+		 * what hpage_pincount_available() checks for), use an
+		 * exact count to track it.
 		 *
-		 * However, be sure to *also* increment the normal page refcount
-		 * field at least once, so that the page really is pinned.
-		 * That's why the refcount from the earlier
+		 * However, be sure to *also* increment the normal page
+		 * refcount field at least once, so that the page really
+		 * is pinned.  That's why the refcount from the earlier
 		 * try_get_compound_head() is left intact.
 		 */
 		if (hpage_pincount_available(page))
-			hpage_pincount_add(page, refs);
+			atomic_add(refs, compound_pincount_ptr(page));
 		else
 			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
 
@@ -216,22 +208,19 @@  bool __must_check try_grab_page(struct page *page, unsigned int flags)
 	if (flags & FOLL_GET)
 		return try_get_page(page);
 	else if (flags & FOLL_PIN) {
-		int refs = 1;
-
 		page = compound_head(page);
 
 		if (WARN_ON_ONCE(page_ref_count(page) <= 0))
 			return false;
 
 		/*
-		 * Similar to try_grab_compound_head(): even if using the
-		 * hpage_pincount_add/_sub() routines, be sure to
-		 * *also* increment the normal page refcount field at least
-		 * once, so that the page really is pinned.
+		 * Similar to try_grab_compound_head(): be sure to *also*
+		 * increment the normal page refcount field at least once,
+		 * so that the page really is pinned.
 		 */
 		if (hpage_pincount_available(page)) {
 			page_ref_add(page, 1);
-			hpage_pincount_add(page, 1);
+			atomic_add(1, compound_pincount_ptr(page));
 		} else {
 			page_ref_add(page, GUP_PIN_COUNTING_BIAS);
 		}