@@ -2,9 +2,9 @@
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
-#include <linux/mm/devmap_managed.h>
#include <linux/mm/folio_next.h>
#include <linux/mm/folio_size.h>
+#include <linux/mm/folio_usage.h>
#include <linux/mm/page_address.h>
#include <linux/mm/page_section.h>
#include <linux/errno.h>
@@ -1074,51 +1074,6 @@ struct inode;
#include <linux/huge_mm.h>
-/*
- * Methods to modify the page usage count.
- *
- * What counts for a page usage:
- * - cache mapping (page->mapping)
- * - private data (page->private)
- * - page mapped in a task's page tables, each mapping
- * is counted separately
- *
- * Also, many kernel routines increase the page count before a critical
- * routine so they can be sure the page doesn't go away from under them.
- */
-
-/*
- * Drop a ref, return true if the refcount fell to zero (the page has no users)
- */
-static inline int put_page_testzero(struct page *page)
-{
- VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
- return page_ref_dec_and_test(page);
-}
-
-static inline int folio_put_testzero(struct folio *folio)
-{
- return put_page_testzero(&folio->page);
-}
-
-/*
- * Try to grab a ref unless the page has a refcount of zero, return false if
- * that is the case.
- * This can be called when MMU is off so it must not access
- * any of the virtual mappings.
- */
-static inline bool get_page_unless_zero(struct page *page)
-{
- return page_ref_add_unless(page, 1, 0);
-}
-
-static inline struct folio *folio_get_nontail_page(struct page *page)
-{
- if (unlikely(!get_page_unless_zero(page)))
- return NULL;
- return (struct folio *)page;
-}
-
extern int page_is_ram(unsigned long pfn);
enum {
@@ -1265,8 +1220,6 @@ static inline struct folio *virt_to_folio(const void *x)
return page_folio(page);
}
-void __folio_put(struct folio *folio);
-
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
@@ -1355,129 +1308,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
* back into memory.
*/
-/* 127: arbitrary random number, small enough to assemble well */
-#define folio_ref_zero_or_close_to_overflow(folio) \
- ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
-
-/**
- * folio_get - Increment the reference count on a folio.
- * @folio: The folio.
- *
- * Context: May be called in any context, as long as you know that
- * you have a refcount on the folio. If you do not already have one,
- * folio_try_get() may be the right interface for you to use.
- */
-static inline void folio_get(struct folio *folio)
-{
- VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
- folio_ref_inc(folio);
-}
-
-static inline void get_page(struct page *page)
-{
- folio_get(page_folio(page));
-}
-
-static inline __must_check bool try_get_page(struct page *page)
-{
- page = compound_head(page);
- if (WARN_ON_ONCE(page_ref_count(page) <= 0))
- return false;
- page_ref_inc(page);
- return true;
-}
-
-/**
- * folio_put - Decrement the reference count on a folio.
- * @folio: The folio.
- *
- * If the folio's reference count reaches zero, the memory will be
- * released back to the page allocator and may be used by another
- * allocation immediately. Do not access the memory or the struct folio
- * after calling folio_put() unless you can be sure that it wasn't the
- * last reference.
- *
- * Context: May be called in process or interrupt context, but not in NMI
- * context. May be called while holding a spinlock.
- */
-static inline void folio_put(struct folio *folio)
-{
- if (folio_put_testzero(folio))
- __folio_put(folio);
-}
-
-/**
- * folio_put_refs - Reduce the reference count on a folio.
- * @folio: The folio.
- * @refs: The amount to subtract from the folio's reference count.
- *
- * If the folio's reference count reaches zero, the memory will be
- * released back to the page allocator and may be used by another
- * allocation immediately. Do not access the memory or the struct folio
- * after calling folio_put_refs() unless you can be sure that these weren't
- * the last references.
- *
- * Context: May be called in process or interrupt context, but not in NMI
- * context. May be called while holding a spinlock.
- */
-static inline void folio_put_refs(struct folio *folio, int refs)
-{
- if (folio_ref_sub_and_test(folio, refs))
- __folio_put(folio);
-}
-
-void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
-
-/*
- * union release_pages_arg - an array of pages or folios
- *
- * release_pages() releases a simple array of multiple pages, and
- * accepts various different forms of said page array: either
- * a regular old boring array of pages, an array of folios, or
- * an array of encoded page pointers.
- *
- * The transparent union syntax for this kind of "any of these
- * argument types" is all kinds of ugly, so look away.
- */
-typedef union {
- struct page **pages;
- struct folio **folios;
- struct encoded_page **encoded_pages;
-} release_pages_arg __attribute__ ((__transparent_union__));
-
-void release_pages(release_pages_arg, int nr);
-
-/**
- * folios_put - Decrement the reference count on an array of folios.
- * @folios: The folios.
- *
- * Like folio_put(), but for a batch of folios. This is more efficient
- * than writing the loop yourself as it will optimise the locks which need
- * to be taken if the folios are freed. The folios batch is returned
- * empty and ready to be reused for another batch; there is no need to
- * reinitialise it.
- *
- * Context: May be called in process or interrupt context, but not in NMI
- * context. May be called while holding a spinlock.
- */
-static inline void folios_put(struct folio_batch *folios)
-{
- folios_put_refs(folios, NULL);
-}
-
-static inline void put_page(struct page *page)
-{
- struct folio *folio = page_folio(page);
-
- /*
- * For some devmap managed pages we need to catch refcount transition
- * from 2 to 1:
- */
- if (put_devmap_managed_folio_refs(folio, 1))
- return;
- folio_put(folio);
-}
-
/*
* GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
* the page's refcount so that two separate items are tracked: the original page
new file mode 100644
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MM_FOLIO_USAGE_H
+#define _LINUX_MM_FOLIO_USAGE_H
+
+#include <linux/mm/devmap_managed.h> // for put_devmap_managed_page()
+#include <linux/mmdebug.h> // for VM_BUG_ON_PAGE()
+#include <linux/mm_types.h> // for struct folio
+#include <linux/page_ref.h>
+
+struct folio_batch;
+
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - private data (page->private)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
+
+/*
+ * Drop a ref, return true if the refcount fell to zero (the page has no users)
+ */
+static inline int put_page_testzero(struct page *page)
+{
+ VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+ return page_ref_dec_and_test(page);
+}
+
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
+/*
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline bool get_page_unless_zero(struct page *page)
+{
+ return page_ref_add_unless(page, 1, 0);
+}
+
+static inline struct folio *folio_get_nontail_page(struct page *page)
+{
+ if (unlikely(!get_page_unless_zero(page)))
+ return NULL;
+ return (struct folio *)page;
+}
+
+void __folio_put(struct folio *folio);
+
+/* 127: arbitrary random number, small enough to assemble well */
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
+
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
+ */
+static inline void folio_get(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
+}
+
+static inline void get_page(struct page *page)
+{
+ folio_get(page_folio(page));
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+ page_ref_inc(page);
+ return true;
+}
+
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
+{
+ if (folio_put_testzero(folio))
+ __folio_put(folio);
+}
+
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
+{
+ if (folio_ref_sub_and_test(folio, refs))
+ __folio_put(folio);
+}
+
+void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
+
+/*
+ * union release_pages_arg - an array of pages or folios
+ *
+ * release_pages() releases a simple array of multiple pages, and
+ * accepts various different forms of said page array: either
+ * a regular old boring array of pages, an array of folios, or
+ * an array of encoded page pointers.
+ *
+ * The transparent union syntax for this kind of "any of these
+ * argument types" is all kinds of ugly, so look away.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+ struct encoded_page **encoded_pages;
+} release_pages_arg __attribute__ ((__transparent_union__));
+
+void release_pages(release_pages_arg, int nr);
+
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ *
+ * Like folio_put(), but for a batch of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which need
+ * to be taken if the folios are freed. The folios batch is returned
+ * empty and ready to be reused for another batch; there is no need to
+ * reinitialise it.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio_batch *folios)
+{
+ folios_put_refs(folios, NULL);
+}
+
+static inline void put_page(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ /*
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
+ */
+ if (put_devmap_managed_folio_refs(folio, 1))
+ return;
+ folio_put(folio);
+}
+
+#endif /* _LINUX_MM_FOLIO_USAGE_H */
Prepare to reduce dependencies on linux/mm.h. This new header contains wrappers for the low-level functions from page_ref.h. By having those higher-level functions in a separate header, we can avoid their additional dependencies in the page_ref.h. Having these in a separate header will allow eliminating the dependency on linux/mm.h from these headers: - linux/skbuff.h - linux/swap.h Signed-off-by: Max Kellermann <max.kellermann@ionos.com> --- include/linux/mm.h | 172 +------------------------------ include/linux/mm/folio_usage.h | 182 +++++++++++++++++++++++++++++++++ 2 files changed, 183 insertions(+), 171 deletions(-) create mode 100644 include/linux/mm/folio_usage.h